diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b2b983e3a..163e46b04 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,6 +6,8 @@ repos: - id: check-json - id: check-merge-conflict - id: end-of-file-fixer + exclude: tools/compogen/cmd/testdata + exclude_types: [svg,mdx] - id: trailing-whitespace - id: pretty-format-json args: [--autofix, --no-sort-keys] diff --git a/ai/anthropic/v0/README.mdx b/ai/anthropic/v0/README.mdx index 8f0fd3d9b..96ec0f001 100644 --- a/ai/anthropic/v0/README.mdx +++ b/ai/anthropic/v0/README.mdx @@ -48,7 +48,6 @@ Provide text outputs in response to text inputs. | Model Name (required) | `model-name` | string | The Anthropic model to be used. | | Prompt (required) | `prompt` | string | The prompt text | | System message | `system-message` | string | The system message helps set the behavior of the assistant. For example, you can modify the personality of the assistant or provide specific instructions about how it should behave throughout the conversation. By default, the model’s behavior is using a generic message as "You are a helpful assistant." | -| Extra Parameters | `extra-params` | object | Extra Parameters | | Prompt Images | `prompt-images` | array[string] | The prompt images (Note: The prompt images will be injected in the order they are provided to the 'prompt' message. Anthropic doesn't support sending images via image-url, use this field instead) | | Chat history | `chat-history` | array[object] | Incorporate external chat history, specifically previous messages within the conversation. Please note that System Message will be ignored and will not have any effect when this field is populated. Each message should adhere to the format: : \{"role": "The message role, i.e. 'system', 'user' or 'assistant'", "content": "message content"\}. | | Seed | `seed` | integer | The seed (Note: Not supported by Anthropic Models) | @@ -61,3 +60,4 @@ Provide text outputs in response to text inputs. | Output | ID | Type | Description | | :--- | :--- | :--- | :--- | | Text | `text` | string | Model Output | +| Usage (optional) | `usage` | object | Usage tokens in Anthropic | diff --git a/ai/anthropic/v0/component_test.go b/ai/anthropic/v0/component_test.go index 80d6703c1..457be7761 100644 --- a/ai/anthropic/v0/component_test.go +++ b/ai/anthropic/v0/component_test.go @@ -42,7 +42,7 @@ func TestComponent_Execute(t *testing.T) { }{ { name: "text generation", - task: textGenerationTask, + task: TextGenerationTask, path: messagesPath, contentType: httpclient.MIMETypeJSON, }, @@ -191,9 +191,14 @@ func TestComponent_Generation(t *testing.T) { input map[string]any wantResp messagesOutput }{ - - input: map[string]any{"prompt": "Hi! What's your name?", "chat-history": mockHistory}, - wantResp: messagesOutput{Text: "Hi! My name is Claude. (messageCount: 3)"}, + input: map[string]any{"prompt": "Hi! What's your name?", "chat-history": mockHistory}, + wantResp: messagesOutput{ + Text: "Hi! My name is Claude. (messageCount: 3)", + Usage: messagesUsage{ + InputTokens: 10, + OutputTokens: 25, + }, + }, } c.Run("ok - generation", func(c *qt.C) { @@ -203,7 +208,7 @@ func TestComponent_Generation(t *testing.T) { c.Assert(err, qt.IsNil) e := &execution{ - ComponentExecution: base.ComponentExecution{Component: connector, SystemVariables: nil, Setup: setup, Task: textGenerationTask}, + ComponentExecution: base.ComponentExecution{Component: connector, SystemVariables: nil, Setup: setup, Task: TextGenerationTask}, client: &MockAnthropicClient{}, } e.execute = e.generateText diff --git a/ai/anthropic/v0/config/definition.json b/ai/anthropic/v0/config/definition.json index 97f4d0a5d..1a9a8afc0 100644 --- a/ai/anthropic/v0/config/definition.json +++ b/ai/anthropic/v0/config/definition.json @@ -17,4 +17,3 @@ "sourceUrl": "https://github.com/instill-ai/component/blob/main/ai/anthropic/v0", "releaseStage": "RELEASE_STAGE_ALPHA" } - diff --git a/ai/anthropic/v0/config/setup.json b/ai/anthropic/v0/config/setup.json index 52bc2bdb8..8ed1482fd 100644 --- a/ai/anthropic/v0/config/setup.json +++ b/ai/anthropic/v0/config/setup.json @@ -1,29 +1,28 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "additionalProperties": true, - "properties": { - "api-key": { - "description": "Fill your Anthropic API key. To find your keys, visit the Anthropic console page.", - "instillUpstreamTypes": [ - "reference" - ], - "instillAcceptFormats": [ - "string" - ], - "instillSecret": true, - "instillCredential": false, - "instillUIOrder": 0, - "title": "API Key", - "type": "string" - } - }, - "required": [ - "api-key" - ], - "instillEditOnNodeFields": [ - "api-key" - ], - "title": "Anthropic Connection", - "type": "object" - } - \ No newline at end of file + "$schema": "http://json-schema.org/draft-07/schema#", + "additionalProperties": true, + "properties": { + "api-key": { + "description": "Fill your Anthropic API key. To find your keys, visit the Anthropic console page.", + "instillUpstreamTypes": [ + "reference" + ], + "instillAcceptFormats": [ + "string" + ], + "instillSecret": true, + "instillCredential": true, + "instillUIOrder": 0, + "title": "API Key", + "type": "string" + } + }, + "required": [ + "api-key" + ], + "instillEditOnNodeFields": [ + "api-key" + ], + "title": "Anthropic Connection", + "type": "object" +} diff --git a/ai/anthropic/v0/config/tasks.json b/ai/anthropic/v0/config/tasks.json index 6853693a0..8477216bb 100644 --- a/ai/anthropic/v0/config/tasks.json +++ b/ai/anthropic/v0/config/tasks.json @@ -25,7 +25,7 @@ "description": "The type of the content part.", "enum": [ "text", - "image_url" + "image_url" ], "instillFormat": "string", "type": "string" @@ -61,59 +61,30 @@ "title": "Chat Message", "type": "object" }, - "common": { - "description": "Input", - "instillEditOnNodeFields": [ - "image-base64", - "model-name" - ], - "instillUIOrder": 0, + "usage": { + "description": "Usage tokens in Anthropic", + "instillUIOrder": 1, "properties": { - "image-base64": { - "description": "Image base64", - "instillAcceptFormats": [ - "image/*" - ], + "input-tokens": { + "description": "The input tokens used by Anthropic", + "instillFormat": "number", "instillUIOrder": 2, - "instillUpstreamTypes": [ - "reference" - ], - "title": "Image", - "type": "string" + "title": "Input Tokens", + "type": "number" }, - "model-name": { - "description": "The Instill Model model to be used.", - "instillAcceptFormats": [ - "string" - ], - "instillUIOrder": 0, - "instillUpstreamTypes": [ - "value", - "reference", - "template" - ], - "title": "Model Name", - "type": "string" + "output-tokens": { + "description": "The output tokens used by Anthropic", + "instillFormat": "number", + "instillUIOrder": 3, + "title": "Output Tokens", + "type": "number" } }, "required": [ - "image-base64", - "model-name" + "input-tokens", + "output-tokens" ], - "title": "Input", - "type": "object" - }, - "extra-params": { - "description": "Extra Parameters", - "instillAcceptFormats": [ - "semi-structured/object" - ], - "instillUIOrder": 3, - "instillUpstreamTypes": [ - "reference" - ], - "required": [], - "title": "Extra Parameters", + "title": "Usage", "type": "object" } }, @@ -144,9 +115,6 @@ "title": "Chat history", "type": "array" }, - "extra-params": { - "$ref": "#/$defs/extra-params" - }, "max-new-tokens": { "default": 50, "description": "The maximum number of tokens for model to generate", @@ -179,6 +147,17 @@ "reference", "template" ], + "instillCredentialMap": { + "values": [ + "claude-3-5-sonnet-20240620", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-3-haiku-20240307" + ], + "targets": [ + "setup.api-key" + ] + }, "title": "Model Name", "type": "string" }, @@ -289,6 +268,9 @@ "instillUIMultiline": true, "title": "Text", "type": "string" + }, + "usage": { + "$ref": "#/$defs/usage" } }, "required": [ @@ -298,4 +280,4 @@ "type": "object" } } -} \ No newline at end of file +} diff --git a/ai/anthropic/v0/main.go b/ai/anthropic/v0/main.go index d7ce4ed48..0ddf28711 100644 --- a/ai/anthropic/v0/main.go +++ b/ai/anthropic/v0/main.go @@ -16,7 +16,7 @@ import ( ) const ( - textGenerationTask = "TASK_TEXT_GENERATION_CHAT" + TextGenerationTask = "TASK_TEXT_GENERATION_CHAT" cfgAPIKey = "api-key" host = "https://api.anthropic.com" messagesPath = "/v1/messages" @@ -38,6 +38,9 @@ var ( type component struct { base.Component + + usageHandlerCreator base.UsageHandlerCreator + secretAPIKey string } type AnthropicClient interface { @@ -70,7 +73,13 @@ type messagesReq struct { } type messagesOutput struct { - Text string `json:"text"` + Text string `json:"text"` + Usage messagesUsage `json:"usage"` +} + +type messagesUsage struct { + InputTokens int `json:"input-tokens"` + OutputTokens int `json:"output-tokens"` } type message struct { @@ -109,17 +118,47 @@ func Init(bc base.Component) *component { type execution struct { base.ComponentExecution - execute func(*structpb.Struct) (*structpb.Struct, error) - client AnthropicClient + + execute func(*structpb.Struct) (*structpb.Struct, error) + client AnthropicClient + usesSecret bool +} + +// WithSecrets loads secrets into the connector, which can be used to configure +// it with globaly defined parameters. +func (c *component) WithSecrets(s map[string]any) *component { + c.secretAPIKey = base.ReadFromSecrets(cfgAPIKey, s) + return c +} + +// WithUsageHandlerCreator overrides the UsageHandlerCreator method. +func (c *component) WithUsageHandlerCreator(newUH base.UsageHandlerCreator) *component { + c.usageHandlerCreator = newUH + return c +} + +// UsageHandlerCreator returns a function to initialize a UsageHandler. +func (c *component) UsageHandlerCreator() base.UsageHandlerCreator { + if c.usageHandlerCreator == nil { + return c.Component.UsageHandlerCreator() + } + return c.usageHandlerCreator } func (c *component) CreateExecution(sysVars map[string]any, setup *structpb.Struct, task string) (*base.ExecutionWrapper, error) { + + resolvedSetup, resolved, err := c.resolveSecrets(setup) + if err != nil { + return nil, err + } + e := &execution{ ComponentExecution: base.ComponentExecution{Component: c, SystemVariables: sysVars, Task: task, Setup: setup}, - client: newClient(getAPIKey(setup), getBasePath(setup), c.GetLogger()), + client: newClient(getAPIKey(resolvedSetup), getBasePath(resolvedSetup), c.GetLogger()), + usesSecret: resolved, } switch task { - case textGenerationTask: + case TextGenerationTask: e.execute = e.generateText default: return nil, fmt.Errorf("unsupported task") @@ -127,6 +166,27 @@ func (c *component) CreateExecution(sysVars map[string]any, setup *structpb.Stru return &base.ExecutionWrapper{Execution: e}, nil } +// resolveSecrets looks for references to a global secret in the setup +// and replaces them by the global secret injected during initialization. +func (c *component) resolveSecrets(conn *structpb.Struct) (*structpb.Struct, bool, error) { + + apiKey := conn.GetFields()[cfgAPIKey].GetStringValue() + if apiKey != base.SecretKeyword { + return conn, false, nil + } + + if c.secretAPIKey == "" { + return nil, false, base.NewUnresolvedSecret(cfgAPIKey) + } + + conn.GetFields()[cfgAPIKey] = structpb.NewStringValue(c.secretAPIKey) + return conn, true, nil +} + +func (e *execution) UsesSecret() bool { + return e.usesSecret +} + func (e *execution) Execute(_ context.Context, inputs []*structpb.Struct) ([]*structpb.Struct, error) { outputs := make([]*structpb.Struct, len(inputs)) @@ -214,6 +274,10 @@ func (e *execution) generateText(in *structpb.Struct) (*structpb.Struct, error) outputStruct := messagesOutput{ Text: "", + Usage: messagesUsage{ + InputTokens: resp.Usage.InputTokens, + OutputTokens: resp.Usage.OutputTokens, + }, } for _, c := range resp.Content { outputStruct.Text += c.Text diff --git a/ai/instill/v0/README.mdx b/ai/instill/v0/README.mdx index c455003a3..737eeb2aa 100644 --- a/ai/instill/v0/README.mdx +++ b/ai/instill/v0/README.mdx @@ -182,7 +182,6 @@ Generate texts from input text prompts. | Model Name (required) | `model-name` | string | The Instill Model model to be used. | | Prompt (required) | `prompt` | string | The prompt text | | System message | `system-message` | string | The system message helps set the behavior of the assistant. For example, you can modify the personality of the assistant or provide specific instructions about how it should behave throughout the conversation. By default, the model’s behavior is using a generic message as "You are a helpful assistant." | -| Extra Parameters | `extra-params` | object | Extra Parameters | | Prompt Images | `prompt-images` | array[string] | The prompt images | | Chat history | `chat-history` | array[object] | Incorporate external chat history, specifically previous messages within the conversation. Please note that System Message will be ignored and will not have any effect when this field is populated. Each message should adhere to the format: : \{"role": "The message role, i.e. 'system', 'user' or 'assistant'", "content": "message content"\}. | | Seed | `seed` | integer | The seed | @@ -212,7 +211,6 @@ Generate texts from input text prompts and chat history. | Model Name (required) | `model-name` | string | The Instill Model model to be used. | | Prompt (required) | `prompt` | string | The prompt text | | System message | `system-message` | string | The system message helps set the behavior of the assistant. For example, you can modify the personality of the assistant or provide specific instructions about how it should behave throughout the conversation. By default, the model’s behavior is using a generic message as "You are a helpful assistant." | -| Extra Parameters | `extra-params` | object | Extra Parameters | | Prompt Images | `prompt-images` | array[string] | The prompt images | | Chat history | `chat-history` | array[object] | Incorporate external chat history, specifically previous messages within the conversation. Please note that System Message will be ignored and will not have any effect when this field is populated. Each message should adhere to the format: : \{"role": "The message role, i.e. 'system', 'user' or 'assistant'", "content": "message content"\}. | | Seed | `seed` | integer | The seed | @@ -241,7 +239,6 @@ Generate images from input text prompts. | Task ID (required) | `task` | string | `TASK_TEXT_TO_IMAGE` | | Model Name (required) | `model-name` | string | The Instill Model model to be used. | | Prompt (required) | `prompt` | string | The prompt text | -| Extra Parameters | `extra-params` | object | Extra Parameters | | CFG Scale | `cfg-scale` | number | The guidance scale, default is 7.5 | | Samples | `samples` | integer | The number of generated samples, default is 1 | | Seed | `seed` | integer | The seed, default is 0 | @@ -269,7 +266,6 @@ Answer questions based on a prompt and an image. | Model Name (required) | `model-name` | string | The Instill Model model to be used. | | Prompt (required) | `prompt` | string | The prompt text | | System message | `system-message` | string | The system message helps set the behavior of the assistant. For example, you can modify the personality of the assistant or provide specific instructions about how it should behave throughout the conversation. By default, the model’s behavior is using a generic message as "You are a helpful assistant." | -| Extra Parameters | `extra-params` | object | Extra Parameters | | Prompt Images (required) | `prompt-images` | array[string] | The prompt images | | Chat history | `chat-history` | array[object] | Incorporate external chat history, specifically previous messages within the conversation. Please note that System Message will be ignored and will not have any effect when this field is populated. Each message should adhere to the format: : \{"role": "The message role, i.e. 'system', 'user' or 'assistant'", "content": "message content"\}. | | Seed | `seed` | integer | The seed | @@ -298,7 +294,6 @@ Generate image from input text prompt and image. | Task ID (required) | `task` | string | `TASK_IMAGE_TO_IMAGE` | | Model Name (required) | `model-name` | string | The Instill Model model to be used. | | Prompt (required) | `prompt` | string | The prompt text | -| Extra Parameters | `extra-params` | object | Extra Parameters | | Prompt Image (required) | `image-base64` | string | The prompt image | | CFG Scale | `cfg-scale` | number | The guidance scale, default is 7.5 | | Seed | `seed` | integer | The seed | diff --git a/ai/instill/v0/config/tasks.json b/ai/instill/v0/config/tasks.json index 639a7ef1a..9c19c25ec 100644 --- a/ai/instill/v0/config/tasks.json +++ b/ai/instill/v0/config/tasks.json @@ -64,19 +64,6 @@ ], "title": "Input", "type": "object" - }, - "extra-params": { - "description": "Extra Parameters", - "instillAcceptFormats": [ - "semi-structured/object" - ], - "instillUIOrder": 3, - "instillUpstreamTypes": [ - "reference" - ], - "required": [], - "title": "Extra Parameters", - "type": "object" } }, "TASK_CLASSIFICATION": { @@ -132,9 +119,6 @@ "title": "CFG Scale", "type": "number" }, - "extra-params": { - "$ref": "#/$defs/extra-params" - }, "image-base64": { "description": "The prompt image", "instillAcceptFormats": [ @@ -334,9 +318,6 @@ "title": "Chat history", "type": "array" }, - "extra-params": { - "$ref": "#/$defs/extra-params" - }, "max-new-tokens": { "default": 50, "description": "The maximum number of tokens for model to generate", @@ -512,9 +493,6 @@ "title": "CFG Scale", "type": "number" }, - "extra-params": { - "$ref": "#/$defs/extra-params" - }, "model-name": { "description": "The Instill Model model to be used.", "instillAcceptFormats": [ @@ -645,9 +623,6 @@ "title": "Chat history", "type": "array" }, - "extra-params": { - "$ref": "#/$defs/extra-params" - }, "max-new-tokens": { "default": 50, "description": "The maximum number of tokens for model to generate", diff --git a/ai/instill/v0/image_to_image.go b/ai/instill/v0/image_to_image.go index 7ddf83cae..dd291eb4b 100644 --- a/ai/instill/v0/image_to_image.go +++ b/ai/instill/v0/image_to_image.go @@ -49,10 +49,6 @@ func (e *execution) executeImageToImage(grpcClient modelPB.ModelPublicServiceCli v := int32(input.GetFields()["seed"].GetNumberValue()) imageToImageInput.Seed = &v } - if _, ok := input.GetFields()["extra-params"]; ok { - v := input.GetFields()["extra-params"].GetStructValue() - imageToImageInput.ExtraParams = v - } taskInput := &modelPB.TaskInput_ImageToImage{ ImageToImage: imageToImageInput, diff --git a/ai/instill/v0/llm_utils.go b/ai/instill/v0/llm_utils.go index 4d3ebbcdd..dce994c77 100644 --- a/ai/instill/v0/llm_utils.go +++ b/ai/instill/v0/llm_utils.go @@ -122,10 +122,6 @@ func (e *execution) convertLLMInput(input *structpb.Struct) *LLMInput { v := int32(input.GetFields()["seed"].GetNumberValue()) llmInput.Seed = &v } - if _, ok := input.GetFields()["extra-params"]; ok { - v := input.GetFields()["extra-params"].GetStructValue() - llmInput.ExtraParams = v - } return llmInput } diff --git a/application/email/v0/README.mdx b/application/email/v0/README.mdx index f5f276ad8..46c676bb1 100644 --- a/application/email/v0/README.mdx +++ b/application/email/v0/README.mdx @@ -150,3 +150,8 @@ For example, if you want to search for the email from `email@example.com`, you n | To | `to` | array[string] | The email address of the recipient | | Subject | `subject` | string | The subject of the email | | Message | `message` | string | The message of the email | + + + + + diff --git a/application/email/v0/config/setup.json b/application/email/v0/config/setup.json index 505e83f91..fa5ac060f 100644 --- a/application/email/v0/config/setup.json +++ b/application/email/v0/config/setup.json @@ -68,6 +68,6 @@ "server-address", "server-port" ], - "title": "Gmail", + "title": "Email", "type": "object" } diff --git a/data/bigquery/v0/config/tasks.json b/data/bigquery/v0/config/tasks.json index 9e3ebc36f..f760e4bd1 100644 --- a/data/bigquery/v0/config/tasks.json +++ b/data/bigquery/v0/config/tasks.json @@ -70,7 +70,9 @@ } } }, - "required": ["data"], + "required": [ + "data" + ], "title": "Output", "type": "object" } diff --git a/data/bigquery/v0/main_test.go b/data/bigquery/v0/main_test.go index 8393e7f61..1206e50e2 100644 --- a/data/bigquery/v0/main_test.go +++ b/data/bigquery/v0/main_test.go @@ -1,3 +1,3 @@ // TODO: chuang8511, add test code // It will be done before 2024-06-26. -package bigquery \ No newline at end of file +package bigquery diff --git a/data/bigquery/v0/read.go b/data/bigquery/v0/read.go index c33117c18..a8bef6bfa 100644 --- a/data/bigquery/v0/read.go +++ b/data/bigquery/v0/read.go @@ -47,11 +47,11 @@ func readDataFromBigQuery(input ReadInput) (ReadOutput, error) { break } data := map[string]any{} - + for i, schema := range it.Schema { data[schema.Name] = values[i] } - + result = append(result, data) } diff --git a/data/googlecloudstorage/v0/config/tasks.json b/data/googlecloudstorage/v0/config/tasks.json index 1233984ba..52ce14a3f 100644 --- a/data/googlecloudstorage/v0/config/tasks.json +++ b/data/googlecloudstorage/v0/config/tasks.json @@ -1,4 +1,3 @@ - { "$defs": { "bucket-name": { @@ -338,7 +337,7 @@ "title": "Text Objects", "type": "array", "items": { - "properties":{ + "properties": { "data": { "$ref": "#/$defs/data" }, @@ -346,7 +345,7 @@ "$ref": "#/$defs/attributes" } }, - "required":[], + "required": [], "title": "Object", "type": "object" } @@ -360,7 +359,7 @@ "title": "Image Objects", "type": "array", "items": { - "properties":{ + "properties": { "data": { "$ref": "#/$defs/data" }, @@ -368,7 +367,7 @@ "$ref": "#/$defs/attributes" } }, - "required":[], + "required": [], "title": "Object", "type": "object" } @@ -382,7 +381,7 @@ "title": "Document Objects", "type": "array", "items": { - "properties":{ + "properties": { "data": { "$ref": "#/$defs/data" }, @@ -390,7 +389,7 @@ "$ref": "#/$defs/attributes" } }, - "required":[], + "required": [], "title": "Object", "type": "object" } @@ -404,7 +403,7 @@ "title": "Audio Objects", "type": "array", "items": { - "properties":{ + "properties": { "data": { "$ref": "#/$defs/data" }, @@ -412,7 +411,7 @@ "$ref": "#/$defs/attributes" } }, - "required":[], + "required": [], "title": "Object", "type": "object" } @@ -426,7 +425,7 @@ "title": "Video Objects", "type": "array", "items": { - "properties":{ + "properties": { "data": { "$ref": "#/$defs/data" }, @@ -434,7 +433,7 @@ "$ref": "#/$defs/attributes" } }, - "required":[], + "required": [], "title": "Object", "type": "object" } @@ -450,7 +449,7 @@ "input": { "instillUIOrder": 0, "properties": { - "bucket-name": { + "bucket-name": { "$ref": "#/$defs/bucket-name" }, "project-id": { diff --git a/data/googlecloudstorage/v0/main_test.go b/data/googlecloudstorage/v0/main_test.go index 62ee67749..7bf255ced 100644 --- a/data/googlecloudstorage/v0/main_test.go +++ b/data/googlecloudstorage/v0/main_test.go @@ -1,4 +1,4 @@ package googlecloudstorage // TODO: chuang8511: add test cases by mocking the GCS client -// It will be done in 2024-06-26 \ No newline at end of file +// It will be done in 2024-06-26 diff --git a/operator/document/v0/python/transformPDFToMarkdown.py b/operator/document/v0/python/transformPDFToMarkdown.py index b62fb882d..e3b2fc03c 100644 --- a/operator/document/v0/python/transformPDFToMarkdown.py +++ b/operator/document/v0/python/transformPDFToMarkdown.py @@ -18,7 +18,7 @@ def __init__(self, x, display_image_tag=False): self.images = [] if display_image_tag: self.process_image() - + for page in self.pages: page_lines = page.extract_text_lines() self.process_line(page_lines, page.page_number) diff --git a/store/store.go b/store/store.go index c21d3926e..3f177f738 100644 --- a/store/store.go +++ b/store/store.go @@ -100,7 +100,13 @@ func Init( compStore.Import(conn) } compStore.Import(cohere.Init(baseComp)) - compStore.Import(anthropic.Init(baseComp)) + { + // Anthropic + conn := anthropic.Init(baseComp) + conn = conn.WithSecrets(secrets[conn.GetID()]). + WithUsageHandlerCreator(usageHandlerCreators[conn.GetID()]) + compStore.Import(conn) + } compStore.Import(archetypeai.Init(baseComp)) compStore.Import(numbers.Init(baseComp)) compStore.Import(bigquery.Init(baseComp))