interface ChatGroqInput {
    apiKey?: string;
    baseUrl?: string;
    cache?: boolean | BaseCache<Generation[]>;
    callbackManager?: CallbackManager;
    callbacks?: Callbacks;
    defaultHeaders?: Record<string, string>;
    defaultQuery?: Record<string, string>;
    disableStreaming?: boolean;
    fetch?: ((...args: any) => any);
    httpAgent?: any;
    maxConcurrency?: number;
    maxRetries?: number;
    maxTokens?: number;
    metadata?: Record<string, unknown>;
    model?: string;
    modelName?: string;
    onFailedAttempt?: FailedAttemptHandler;
    stop?: null | string | string[];
    stopSequences?: string[];
    streaming?: boolean;
    tags?: string[];
    temperature?: number;
    timeout?: number;
    verbose?: boolean;
}

Hierarchy

  • BaseChatModelParams
    • ChatGroqInput

Properties

apiKey?: string

The Groq API key to use for requests.

process.env.GROQ_API_KEY
baseUrl?: string

Override the default base URL for the API

cache?: boolean | BaseCache<Generation[]>
callbackManager?: CallbackManager

Use callbacks instead

callbacks?: Callbacks
defaultHeaders?: Record<string, string>

Default headers included with every request

defaultQuery?: Record<string, string>

Default query parameters included with every request

disableStreaming?: boolean

Whether to disable streaming.

If streaming is bypassed, then stream() will defer to invoke().

  • If true, will always bypass streaming case.
  • If false (default), will always use streaming case if available.
fetch?: ((...args: any) => any)

Custom fetch function implementation

httpAgent?: any

HTTP agent used to manage connections

maxConcurrency?: number

The maximum number of concurrent calls that can be made. Defaults to Infinity, which means no limit.

maxRetries?: number

The maximum number of retries that can be made for a single call, with an exponential backoff between each attempt. Defaults to 6.

maxTokens?: number

The maximum number of tokens that the model can process in a single response. This limits ensures computational efficiency and resource management.

metadata?: Record<string, unknown>
model?: string

The name of the model to use.

"mixtral-8x7b-32768"
modelName?: string

The name of the model to use. Alias for model

"mixtral-8x7b-32768"
onFailedAttempt?: FailedAttemptHandler

Custom handler to handle failed attempts. Takes the originally thrown error object as input, and should itself throw an error if the input error is not retryable.

stop?: null | string | string[]

Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. Alias for stopSequences

stopSequences?: string[]

Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.

streaming?: boolean

Whether or not to stream responses.

tags?: string[]
temperature?: number

The temperature to use for sampling.

0.7
timeout?: number

The maximum amount of time (in milliseconds) the client will wait for a response

verbose?: boolean