"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[25280],{25280:function(e,t,o){o.d(t,{Dk:function(){return er},FM:function(){return eO},G7:function(){return eG},Hp:function(){return eo},Om:function(){return h},PC:function(){return f},PI:function(){return en},QA:function(){return eu},QH:function(){return ea},TU:function(){return P},Zg:function(){return $},gR:function(){return et},iw:function(){return Y},l8:function(){return K},mf:function(){return eM},t4:function(){return A},tR:function(){return ev},u0:function(){return eN},vw:function(){return eC},xG:function(){return ei},yZ:function(){return ey},yp:function(){return I}});var a,n,i,r,l,u,s,m,p,d,c,g,T,k,f,h,P,x,b,v,C,y,O,z,G,N,M,w,D,I,S=o(70385),j=o(1604);function A(e){return!!Array.isArray(e)&&e.every(e=>{if("object"!=typeof e||null===e)return!1;let{role:t,content:o}=e;return"string"==typeof t&&Object.values(f).includes(t)&&"string"==typeof o})}(a=f||(f={})).System="system",a.User="user",a.Assistant="assistant";let L=j.z.object({role:j.z.nativeEnum(f),content:j.z.string()});(n=h||(h={})).Auto="auto",n.Any="any",n.Tool="tool";let q=j.z.object({modelFamily:j.z.literal("anthropic"),choice:j.z.nativeEnum(h),functionName:j.z.string().optional()});(i=P||(P={})).None="none",i.Auto="auto",i.Function="function",i.Required="required";let B=j.z.object({modelFamily:j.z.literal("openai"),choice:j.z.nativeEnum(P),functionName:j.z.string().optional()});(r=x||(x={})).Auto="auto",r.Any="any",r.None="none";let R=j.z.object({modelFamily:j.z.literal("google"),choice:j.z.nativeEnum(x),functionName:j.z.string().optional()});(l=b||(b={})).Auto="auto",l.Any="any",l.None="none";let U=j.z.object({modelFamily:j.z.literal("mistral"),choice:j.z.nativeEnum(b),functionName:j.z.string().optional()});(v||(v={})).Auto="auto";let E=j.z.object({modelFamily:j.z.literal("cohere"),choice:j.z.nativeEnum(v),functionName:j.z.string().optional()});(C||(C={})).None="none";let F=j.z.object({modelFamily:j.z.literal("groqLlamma"),choice:j.z.nativeEnum(C),functionName:j.z.string().optional()}),J=j.z.object({temperature:j.z.number().optional(),maxTokens:j.z.number().optional(),topP:j.z.number().optional(),frequencyPenalty:j.z.number().optional(),presencePenalty:j.z.number().optional(),jsonMode:j.z.boolean().optional(),toolChoice:B.or(q).or(R).or(U).or(E).or(F)}),H={range:{temperature:{label:"Temperature",min:0,max:1,step:.01},maxTokens:{label:"Max Tokens",min:1,max:1e4,step:1},topP:{label:"Top P",min:0,max:1,step:.01},frequencyPenalty:{label:"Frequency Penalty",min:0,max:1,step:.01},presencePenalty:{label:"Presence Penalty",min:0,max:1,step:.01}},toggle:{jsonMode:{label:"JSON Mode"}}},W={range:{maxTokens:{label:"Max Tokens",min:1,max:1e5,step:1}}},_={range:{temperature:{label:"Temperature",min:0,max:1,step:.01},maxTokens:{label:"Max Tokens",min:1,max:1e4,step:1},topP:{label:"Top P",min:0,max:1,step:.01}}},X={range:{temperature:{label:"Temperature",min:0,max:1,step:.01},maxTokens:{label:"Max Tokens",min:1,max:1e4,step:1},topP:{label:"Top P",min:0,max:1,step:.01}},toggle:{jsonMode:{label:"JSON Mode"}}},Q={range:{temperature:{label:"Temperature",min:0,max:1,step:.01},maxTokens:{label:"Max Tokens",min:1,max:1e4,step:1},topP:{label:"Top P",min:0,max:1,step:.01}},toggle:{jsonMode:{label:"JSON Mode"}}},V={range:{temperature:{label:"Temperature",min:0,max:1,step:.01},maxTokens:{label:"Max Tokens",min:1,max:1e4,step:1},topP:{label:"Top P",min:0,max:1,step:.01},frequencyPenalty:{label:"Frequency Penalty",min:0,max:1,step:.01},presencePenalty:{label:"Presence Penalty",min:0,max:1,step:.01}},toggle:{jsonMode:{label:"JSON Mode"}}},Z={range:{temperature:{label:"Temperature",min:0,max:1,step:.01},maxTokens:{label:"Max Tokens",min:1,max:1e4,step:1},topP:{label:"Top P",min:0,max:1,step:.01}}},Y=e=>{let t;t=K(e)||$(e)?_:eo(e)?X:ea(e)?Q:en(e)?V:ei(e)?Z:et(e)?W:H;let o=er[e];return(null==o?void 0:o.maxOutputTokens)&&(t.range.maxTokens.max=o.maxOutputTokens),t};(u=y||(y={})).Claude3Haiku="claude-3-haiku-20240307",u.Claude3Sonnet="claude-3-sonnet-20240229",u.Claude3Opus="claude-3-opus-20240229",u.Claude35Sonnet20240620="claude-3-5-sonnet-20240620",u.Claude35Sonnet20241022="claude-3-5-sonnet-20241022",u.Claude35SonnetLatest="claude-3-5-sonnet-latest";let K=e=>Object.values(y).includes(e);(s=O||(O={})).Claude3HaikuBedrock="anthropic.claude-3-haiku-20240307-v1:0",s.Claude3SonnetBedrock="anthropic.claude-3-sonnet-20240229-v1:0",s.Claude3OpusBedrock="anthropic.claude-3-opus-20240229-v1:0";let $=e=>Object.values(O).includes(e);(m=z||(z={})).GPT35Turbo="gpt-3.5-turbo",m.GPT35Turbo0125="gpt-3.5-turbo-0125",m.GPT35Turbo1106="gpt-3.5-turbo-1106",m.GPT4Turbo="gpt-4-turbo",m.GPT4Turbo20240409="gpt-4-turbo-2024-04-09",m.GPT4TurboPreview="gpt-4-turbo-preview",m.GPT4Turbo0125Preview="gpt-4-0125-preview",m.GPT4Turbo1106Preview="gpt-4-1106-preview",m.GPT4="gpt-4",m.GPT40613="gpt-4-0613",m.GPT4o="gpt-4o",m.GPT4o20240513="gpt-4o-2024-05-13",m.GPT4oMini="gpt-4o-mini",m.GPT4oMini20240718="gpt-4o-mini-2024-07-18",m.O1Preview="o1-preview",m.O1Mini="o1-mini",m.O1Preview20240912="o1-preview-2024-09-12",m.O1Mini20240912="o1-mini-2024-09-12",(p=G||(G={})).O1Preview="o1-preview",p.O1Mini="o1-mini",p.O1Preview20240912="o1-preview-2024-09-12",p.O1Mini20240912="o1-mini-2024-09-12";let ee=e=>Object.values(z).includes(e),et=e=>Object.values(G).includes(e);(d=N||(N={})).Gemini15Pro="gemini-1.5-pro",d.GeminiFlash="gemini-1.5-flash",d.Gemini10Pro="gemini-1.0-pro",d.GeminiExperimental="gemini-1.5-pro-exp-0801";let eo=e=>Object.values(N).includes(e);(c=M||(M={})).Mixtral8X22B="open-mixtral-8x22b",c.Mixtral8X22B2404="open-mixtral-8x22b-2404",c.MistralSmall="mistral-small-latest",c.MistralSmall2402="mistral-small-2402",c.MistralMedium="mistral-medium-latest",c.MistralMedium2312="mistral-medium-2312",c.MistralLarge="mistral-large-latest",c.MistralLarge2402="mistral-large-2402",c.MistralCodestralLatest="codestral-latest",c.MistralCodestral2402="codestral-2405";let ea=e=>Object.values(M).includes(e);(g=w||(w={})).CommandRPlus="command-r-plus",g.CommandR="command-r",g.CommandLight="command-light";let en=e=>Object.values(w).includes(e);(T=D||(D={})).Llama38B8192="llama3-8b-8192",T.Llama370B8192="llama3-70b-8192";let ei=e=>Object.values(D).includes(e);(k=I||(I={})).Claude35Sonnet20240620="claude-3-5-sonnet-20240620",k.Claude35Sonnet20241022="claude-3-5-sonnet-20241022",k.Claude35SonnetLatest="claude-3-5-sonnet-latest",k.Claude3Sonnet="claude-3-sonnet-20240229",k.Claude3Opus="claude-3-opus-20240229",k.Claude3Haiku="claude-3-haiku-20240307",k.Claude3HaikuBedrock="anthropic.claude-3-haiku-20240307-v1:0",k.Claude3SonnetBedrock="anthropic.claude-3-sonnet-20240229-v1:0",k.Claude3OpusBedrock="anthropic.claude-3-opus-20240229-v1:0",k.O1Preview="o1-preview",k.O1Mini="o1-mini",k.O1Preview20240912="o1-preview-2024-09-12",k.O1Mini20240912="o1-mini-2024-09-12",k.GPT4o="gpt-4o",k.GPT4o20240513="gpt-4o-2024-05-13",k.GPT4Turbo="gpt-4-turbo",k.GPT4Turbo20240409="gpt-4-turbo-2024-04-09",k.GPT4TurboPreview="gpt-4-turbo-preview",k.GPT4Turbo0125Preview="gpt-4-0125-preview",k.GPT4Turbo1106Preview="gpt-4-1106-preview",k.GPT4="gpt-4",k.GPT40613="gpt-4-0613",k.GPT4oMini="gpt-4o-mini",k.GPT4oMini20240718="gpt-4o-mini-2024-07-18",k.GPT35Turbo="gpt-3.5-turbo",k.GPT35Turbo0125="gpt-3.5-turbo-0125",k.GPT35Turbo1106="gpt-3.5-turbo-1106",k.Gemini15Pro="gemini-1.5-pro",k.GeminiFlash="gemini-1.5-flash",k.Gemini10Pro="gemini-1.0-pro",k.GeminiExperimental="gemini-1.5-pro-exp-0801",k.Mixtral8X22B="open-mixtral-8x22b",k.Mixtral8X22B2404="open-mixtral-8x22b-2404",k.MistralSmall="mistral-small-latest",k.MistralSmall2402="mistral-small-2402",k.MistralMedium="mistral-medium-latest",k.MistralMedium2312="mistral-medium-2312",k.MistralLarge="mistral-large-latest",k.MistralLarge2402="mistral-large-2402",k.MistralCodestralLatest="codestral-latest",k.MistralCodestral2402="codestral-2405",k.CommandRPlus="command-r-plus",k.CommandR="command-r",k.CommandLight="command-light",k.Llama38B8192="llama3-8b-8192",k.Llama370B8192="llama3-70b-8192";let er={"o1-preview":{maxInputTokens:128e3,maxOutputTokens:32768,format:"openai",flavor:"chat",multimodal:!1,trainingData:"Up to Oct 2023",inputCostPerToken:15e-6,outputCostPerToken:6e-5,displayName:"O1 Preview",description:"Points to the most recent snapshot of the o1 model: o1-preview-2024-09-12"},"o1-mini":{maxInputTokens:128e3,maxOutputTokens:65536,format:"openai",flavor:"chat",multimodal:!1,trainingData:"Up to Oct 2023",inputCostPerToken:3e-6,outputCostPerToken:12e-6,displayName:"O1 Mini",description:"Points to the most recent o1-mini snapshot: o1-mini-2024-09-12"},"o1-preview-2024-09-12":{maxInputTokens:128e3,maxOutputTokens:32768,format:"openai",flavor:"chat",multimodal:!1,trainingData:"Up to Oct 2023",inputCostPerToken:15e-6,outputCostPerToken:6e-5,displayName:"O1 Preview 2024-09-12",description:"Latest o1 model snapshot"},"o1-mini-2024-09-12":{maxInputTokens:128e3,maxOutputTokens:65536,format:"openai",flavor:"chat",multimodal:!1,trainingData:"Up to Oct 2023",inputCostPerToken:3e-6,outputCostPerToken:12e-6,displayName:"O1 Mini 2024-09-12",description:"Latest o1-mini model snapshot"},"gpt-4o-mini":{maxInputTokens:128e3,maxOutputTokens:16e3,format:"openai",flavor:"chat",multimodal:!0,trainingData:"Up to Oct 2023",inputCostPerToken:15e-8,outputCostPerToken:6e-7,displayName:"GPT 4o mini",description:"Our high-intelligence flagship model for complex, multi-step tasks. GPT-4o is cheaper and faster than GPT-4 Turbo. Currently points to gpt-4o-2024-05-13."},"gpt-4o-mini-2024-07-18":{maxInputTokens:128e3,maxOutputTokens:16e3,format:"openai",flavor:"chat",multimodal:!0,trainingData:"Up to Oct 2023",inputCostPerToken:15e-8,outputCostPerToken:6e-7,displayName:"GPT 4o mini 20240718",description:"gpt-4o currently points to this version."},"gpt-3.5-turbo":{maxInputTokens:16385,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!1,inputCostPerToken:5e-7,outputCostPerToken:15e-7,displayName:"GPT 3.5T",trainingData:"Up to Sep 2021",description:"Currently points to gpt-3.5-turbo-0125. GPT-3.5 Turbo models can understand and generate natural language or code and have been optimized for chat using the Chat Completions API but work well for non-chat tasks as well."},"gpt-3.5-turbo-0125":{maxInputTokens:16385,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!1,inputCostPerToken:5e-7,outputCostPerToken:15e-7,displayName:"GPT 3.5T 0125",trainingData:"Up to Sep 2021",description:"The latest GPT-3.5 Turbo model with higher accuracy at responding in requested formats and a fix for a bug which caused a text encoding issue for non-English language function calls. Returns a maximum of 4,096 output tokens."},"gpt-3.5-turbo-1106":{maxInputTokens:16385,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!1,inputCostPerToken:1e-6,outputCostPerToken:2e-6,displayName:"GPT 3.5T 1106",trainingData:"Up to Sep 2021",description:"GPT-3.5 Turbo model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens."},"gpt-4":{maxInputTokens:8192,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!1,inputCostPerToken:3e-5,outputCostPerToken:6e-5,displayName:"GPT 4",trainingData:"Up to Sep 2021",description:"Currently points to gpt-4-0613. See continuous model upgrades."},"gpt-4-0613":{maxInputTokens:8192,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!1,inputCostPerToken:3e-5,outputCostPerToken:6e-5,displayName:"GPT 4 0613",trainingData:"Up to Sep 2021",description:"Snapshot of gpt-4 from June 13th 2023 with improved function calling support."},"gpt-4-turbo":{maxInputTokens:128e3,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!0,inputCostPerToken:1e-5,outputCostPerToken:3e-5,trainingData:"Up to Dec 2023",displayName:"GPT 4T",description:"The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling. Currently points to gpt-4-turbo-2024-04-09."},"gpt-4-turbo-preview":{maxInputTokens:128e3,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!0,inputCostPerToken:1e-5,outputCostPerToken:3e-5,displayName:"GPT 4 Turbo Preview",trainingData:"Up to Dec 2023",description:"GPT-4 Turbo preview model. Currently points to gpt-4-0125-preview"},"gpt-4-0125-preview":{maxInputTokens:128e3,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!0,inputCostPerToken:1e-5,outputCostPerToken:3e-5,displayName:"GPT 4 0125 Preview",trainingData:"Up to Dec 2023",description:"GPT-4 Turbo preview model intended to reduce cases of 'laziness' where the model doesn't complete a task. Returns a maximum of 4,096 output tokens. Learn more."},"gpt-4-1106-preview":{maxInputTokens:128e3,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!0,inputCostPerToken:1e-5,outputCostPerToken:3e-5,displayName:"GPT 4 1106 Preview",trainingData:"Up to Apr 2023",description:"GPT-4 Turbo preview model featuring improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens. This is a preview model. Learn more."},"gpt-4o":{maxInputTokens:128e3,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!0,trainingData:"Up to Oct 2023",inputCostPerToken:3e-7,outputCostPerToken:12e-7,displayName:"GPT 4o",description:"Our high-intelligence flagship model for complex, multi-step tasks. GPT-4o is cheaper and faster than GPT-4 Turbo. Currently points to gpt-4o-2024-05-13."},"gpt-4o-2024-05-13":{maxInputTokens:128e3,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!0,trainingData:"Up to Oct 2023",inputCostPerToken:3e-7,outputCostPerToken:12e-7,displayName:"GPT 4o 20240513",description:"gpt-4o currently points to this version."},"claude-3-5-sonnet-20240620":{maxInputTokens:2e5,maxOutputTokens:8192,format:"anthropic",flavor:"chat",multimodal:!0,trainingData:"Apr 2024",inputCostPerToken:3e-6,outputCostPerToken:15e-6,displayName:"Claude 3.5 Sonnet",description:"Our most intelligent model to date"},"claude-3-5-sonnet-20241022":{maxInputTokens:2e5,maxOutputTokens:8192,format:"anthropic",flavor:"chat",multimodal:!0,trainingData:"Apr 2024",inputCostPerToken:3e-6,outputCostPerToken:15e-6,displayName:"Claude 3.5 Sonnet",description:"Our most intelligent model to date"},"claude-3-5-sonnet-latest":{maxInputTokens:2e5,maxOutputTokens:8192,format:"anthropic",flavor:"chat",multimodal:!0,trainingData:"Apr 2024",inputCostPerToken:3e-6,outputCostPerToken:15e-6,displayName:"Claude 3.5 Sonnet",description:"Our most intelligent model to date"},"claude-3-sonnet-20240229":{maxInputTokens:2e5,maxOutputTokens:4096,format:"anthropic",flavor:"chat",multimodal:!0,trainingData:"Aug 2023",inputCostPerToken:3e-6,outputCostPerToken:15e-6,displayName:"Claude 3 Sonnet",description:"Balance of intelligence and speed"},"claude-3-opus-20240229":{maxInputTokens:2e5,maxOutputTokens:4096,format:"anthropic",flavor:"chat",multimodal:!0,trainingData:"Aug 2023",inputCostPerToken:15e-6,outputCostPerToken:75e-6,displayName:"Claude 3 Opus",description:"Powerful model for complex tasks"},"claude-3-haiku-20240307":{maxInputTokens:2e5,maxOutputTokens:4096,format:"anthropic",flavor:"chat",multimodal:!0,trainingData:"Aug 2023",inputCostPerToken:25e-8,outputCostPerToken:125e-8,displayName:"Claude 3 Haiku",description:"Fastest, most cost-effective model"},"anthropic.claude-3-sonnet-20240229-v1:0":{maxInputTokens:2e5,maxOutputTokens:4096,format:"anthropic",flavor:"chat",multimodal:!0,trainingData:"Aug 2023",inputCostPerToken:3e-6,outputCostPerToken:15e-6,displayName:"Claude 3 Sonnet Bedrock",description:"Balance of intelligence and speed"},"anthropic.claude-3-opus-20240229-v1:0":{maxInputTokens:2e5,maxOutputTokens:4096,format:"anthropic",flavor:"chat",multimodal:!0,trainingData:"Aug 2023",inputCostPerToken:15e-6,outputCostPerToken:75e-6,displayName:"Claude 3 Opus Bedrock",description:"Powerful model for complex tasks"},"anthropic.claude-3-haiku-20240307-v1:0":{maxInputTokens:2e5,maxOutputTokens:4096,format:"anthropic",flavor:"chat",multimodal:!0,trainingData:"Aug 2023",inputCostPerToken:25e-8,outputCostPerToken:125e-8,displayName:"Claude 3 Haiku Bedrock",description:"Fastest, most cost-effective model"},"gemini-1.5-pro":{maxInputTokens:2097152,maxOutputTokens:8192,format:"google",flavor:"chat",multimodal:!0,trainingData:"November 2023",inputCostPerToken:7e-6,outputCostPerToken:21e-6,displayName:"Gemini 1.5 Pro Latest",description:"Complex reasoning tasks such as code and text generation, text editing, problem solving, data extraction and generation"},"gemini-1.5-pro-exp-0801":{maxInputTokens:2097152,maxOutputTokens:8192,format:"google",flavor:"chat",multimodal:!0,trainingData:"November 2023",inputCostPerToken:7e-6,outputCostPerToken:21e-6,displayName:"Gemini 1.5 Pro Experimental 0801",description:"Experimental model for testing new features"},"gemini-1.5-flash":{maxInputTokens:1048576,maxOutputTokens:8192,format:"google",flavor:"chat",multimodal:!0,trainingData:"November 2023",inputCostPerToken:7e-7,outputCostPerToken:21e-7,displayName:"Gemini 1.5 Flash Latest",description:"Fast and versatile performance across a diverse variety of tasks"},"gemini-1.0-pro":{maxInputTokens:32e3,maxOutputTokens:4096,format:"google",flavor:"chat",multimodal:!1,trainingData:"November 2023",inputCostPerToken:5e-7,outputCostPerToken:15e-7,displayName:"Gemini 1.0 Pro",description:"Natural language tasks, multi-turn text and code chat, and code generation"},"open-mixtral-8x22b":{maxInputTokens:64e3,maxOutputTokens:64e3,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:2e-6,outputCostPerToken:6e-6,displayName:"Mixtral 8x22B",description:"A bigger sparse mixture of experts model. As such, it leverages up to 141B parameters but only uses about 39B during inference, leading to better inference throughput at the cost of more vRAM. Learn more on the dedicated blog post"},"open-mixtral-8x22b-2404":{maxInputTokens:64e3,maxOutputTokens:64e3,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:2e-6,outputCostPerToken:6e-6,displayName:"Mixtral 8x22B 2404",description:"A bigger sparse mixture of experts model. As such, it leverages up to 141B parameters but only uses about 39B during inference, leading to better inference throughput at the cost of more vRAM. Learn more on the dedicated blog post"},"mistral-small-latest":{maxInputTokens:32e3,maxOutputTokens:32e3,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:1e-6,outputCostPerToken:3e-6,displayName:"Mistral Small",description:"Suitable for simple tasks that one can do in bulk (Classification, Customer Support, or Text Generation)"},"mistral-small-2402":{maxInputTokens:32e3,maxOutputTokens:32e3,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:1e-6,outputCostPerToken:3e-6,displayName:"Mistral Small 2402",description:"Suitable for simple tasks that one can do in bulk (Classification, Customer Support, or Text Generation)"},"mistral-medium-latest":{maxInputTokens:32e3,maxOutputTokens:32e3,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:27e-7,outputCostPerToken:81e-7,displayName:"Mistral Medium",description:"Ideal for intermediate tasks that require moderate reasoning (Data extraction, Summarizing a Document, Writing emails, Writing a Job Description, or Writing Product Descriptions)"},"mistral-medium-2312":{maxInputTokens:32e3,maxOutputTokens:32e3,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:27e-7,outputCostPerToken:81e-7,displayName:"Mistral Medium 2312",description:"Ideal for intermediate tasks that require moderate reasoning (Data extraction, Summarizing a Document, Writing emails, Writing a Job Description, or Writing Product Descriptions)"},"mistral-large-latest":{maxInputTokens:32e3,maxOutputTokens:32e3,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:4e-6,outputCostPerToken:12e-6,displayName:"Mistral Large",description:"Our flagship model that's ideal for complex tasks that require large reasoning capabilities or are highly specialized (Synthetic Text Generation, Code Generation, RAG, or Agents). Learn more on our blog post"},"mistral-large-2402":{maxInputTokens:32e3,maxOutputTokens:32e3,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:4e-6,outputCostPerToken:12e-6,displayName:"Mistral Large 2402",description:"Our flagship model that's ideal for complex tasks that require large reasoning capabilities or are highly specialized (Synthetic Text Generation, Code Generation, RAG, or Agents). Learn more on our blog post"},"codestral-latest":{maxInputTokens:32e3,maxOutputTokens:32e3,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:1e-6,outputCostPerToken:3e-6,displayName:"Codestral",description:"A cutting-edge generative model that has been specifically designed and optimized for code generation tasks, including fill-in-the-middle and code completion"},"codestral-2405":{maxInputTokens:32e3,maxOutputTokens:32e3,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:1e-6,outputCostPerToken:3e-6,displayName:"Codestral 2402",description:"A cutting-edge generative model that has been specifically designed and optimized for code generation tasks, including fill-in-the-middle and code completion"},"command-r-plus":{maxInputTokens:128e3,maxOutputTokens:4e3,format:"cohere",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:3e-6,outputCostPerToken:15e-6,displayName:"Command R+",description:"Command R+, our most powerful, scalable large language model (LLM) purpose-built to excel at real-world enterprise use cases."},"command-r":{maxInputTokens:128e3,maxOutputTokens:4e3,format:"cohere",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:5e-7,outputCostPerToken:15e-7,displayName:"Command R",description:"Command R is a generative model optimized for long context tasks such as retrieval-augmented generation (RAG) and using external APIs and tools."},"command-light":{maxInputTokens:4e3,maxOutputTokens:4e3,format:"cohere",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:3e-7,outputCostPerToken:6e-7,displayName:"Command Light",description:"A smaller, faster version of command. Almost as capable, but a lot faster."},"llama3-8b-8192":{maxInputTokens:8192,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:1e-7,outputCostPerToken:1e-7,displayName:"LLaMA 3 8b 8192",description:void 0},"llama3-70b-8192":{maxInputTokens:8192,maxOutputTokens:4096,format:"openai",flavor:"chat",multimodal:!1,trainingData:void 0,inputCostPerToken:64e-8,outputCostPerToken:8e-7,displayName:"LLaMA 3 70b 8192",description:void 0}},el=e=>{if(ee(e))return"openai";if(K(e)||$(e))return"anthropic";if(eo(e))return"google";if(ea(e))return"mistral";if(en(e))return"cohere";if(ei(e))return"groqLlamma";throw Error("Invalid model")},eu=e=>{if("anthropic"===e)return Object.values(h);if("openai"===e)return Object.values(P);if("google"===e)return Object.values(x);if("mistral"===e)return Object.values(b);if("cohere"===e)return Object.values(v);if("groqLlamma"===e)return Object.values(C);throw Error("Invalid family model")},es=j.z.object({name:j.z.string(),arguments:j.z.string()}),em=j.z.object({id:j.z.string(),function:es,type:j.z.literal("function")}),ep=j.z.object({id:j.z.string(),type:j.z.literal("tool_use"),name:j.z.string(),input:j.z.record(j.z.unknown())}),ed=j.z.object({name:j.z.string(),args:j.z.record(j.z.unknown())}),ec=j.z.object({id:j.z.string(),function:es}),eg=j.z.object({generation_id:j.z.string().optional(),name:j.z.string().describe("Name of the tool to call"),parameters:j.z.record(j.z.unknown()).describe("The name and value of the parameters to use when invoking a tool")}),eT=j.z.object({type:j.z.literal("openai"),toolsCalls:j.z.array(em)}),ek=j.z.object({type:j.z.literal("anthropic"),toolsCalls:j.z.array(ep)}),ef=j.z.object({type:j.z.literal("google"),toolsCalls:j.z.array(ed)}),eh=j.z.object({type:j.z.literal("mistral"),toolsCalls:j.z.array(ec)}),eP=j.z.object({type:j.z.literal("cohere"),toolsCalls:j.z.array(eg)});j.z.object({text:j.z.string().optional(),toolsCalls:eT.or(ek).or(ef).or(eh).or(eP).optional(),error:j.z.nullable(j.z.string())});let ex=j.z.object({id:j.z.string(),name:j.z.string(),slug:j.z.string(),description:j.z.string().nullable(),promptType:j.z.nativeEnum(S.PromptType),labels:j.z.record(j.z.string()),updatedAt:j.z.date()}),eb=j.z.object({id:j.z.string(),version:j.z.string(),content:j.z.record(j.z.unknown()),createdAt:j.z.date()});ex.extend({promptVersions:j.z.array(eb)}),j.z.object({languageModel:j.z.nativeEnum(I),promptSettings:J,chatMessages:j.z.array(L),tools:j.z.string().nullable()}),j.z.object({text:j.z.string()});let ev={temperature:0,maxTokens:void 0,topP:1,frequencyPenalty:void 0,presencePenalty:void 0,toolChoice:{modelFamily:"anthropic",choice:"auto"}},eC=[{role:"system",content:"You are a helpful assistant."},{role:"user",content:"Hello, how are you?"}],ey={languageModel:"claude-3-5-sonnet-latest",promptSettings:ev,chatMessages:eC,tools:null};function eO(e){return e.toolsCalls?"openai"===e.toolsCalls.type||"mistral"===e.toolsCalls.type?e.toolsCalls.toolsCalls.map(e=>({function:{name:e.function.name,arguments:JSON.parse(e.function.arguments)}})):"anthropic"===e.toolsCalls.type?e.toolsCalls.toolsCalls.map(e=>({name:e.name,input:e.input})):"google"===e.toolsCalls.type?e.toolsCalls.toolsCalls.map(e=>({name:e.name,args:e.args})):"cohere"===e.toolsCalls.type?e.toolsCalls.toolsCalls.map(e=>({name:e.name,parameters:e.parameters})):void 0:[]}let ez={openai:{openai:{none:"none",auto:"auto",function:"function",required:"required"},anthropic:{none:"auto",auto:"auto",function:"tool",required:"any"},google:{none:"none",auto:"auto",function:"any",required:"any"},mistral:{none:"none",auto:"auto",function:"any",required:"any"},cohere:{none:"auto",auto:"auto",function:"auto",required:"auto"},groqLlamma:{none:"none",auto:"none",function:"none",required:"none"}},anthropic:{anthropic:{any:"auto",auto:"auto",tool:"tool"},openai:{any:"required",auto:"auto",tool:"function"},google:{any:"auto",auto:"auto",tool:"any"},mistral:{any:"auto",auto:"auto",tool:"any"},cohere:{any:"auto",auto:"auto",tool:"auto"},groqLlamma:{any:"none",auto:"none",tool:"none"}},google:{google:{auto:"auto",any:"any",none:"none"},openai:{auto:"auto",any:"required",none:"none"},anthropic:{auto:"auto",any:"tool",none:"auto"},mistral:{auto:"auto",any:"any",none:"none"},cohere:{auto:"auto",any:"auto",none:"auto"},groqLlamma:{auto:"none",any:"none",none:"none"}},mistral:{mistral:{auto:"auto",any:"any",none:"none"},openai:{auto:"auto",any:"required",none:"none"},anthropic:{auto:"auto",any:"tool",none:"any"},google:{auto:"auto",any:"any",none:"none"},cohere:{auto:"auto",any:"auto",none:"auto"},groqLlamma:{auto:"none",any:"none",none:"none"}},cohere:{cohere:{auto:"auto"},openai:{auto:"auto"},anthropic:{auto:"auto"},google:{auto:"auto"},mistral:{auto:"auto"},groqLlamma:{auto:"none"}},groqLlamma:{groqLlamma:{none:"none"},openai:{none:"auto"},anthropic:{none:"auto"},google:{none:"auto"},mistral:{none:"auto"},cohere:{none:"auto"}}};function eG(e,t,o){let a=el(t),n=el(o);if(a===n)return e;let i=ez[a][n][e.choice];return{modelFamily:n,choice:i,functionName:e.functionName}}let eN="production",eM="staging"}}]);