From 21d2574519664eb9df488524a5830f149880c360 Mon Sep 17 00:00:00 2001 From: "Ankush Pala ankush@lastmileai.dev" <> Date: Mon, 29 Jan 2024 17:13:23 -0500 Subject: [PATCH] [editor] Claude Bedrock Prompt Schema ## Testplan https://github.com/lastmile-ai/aiconfig/assets/141073967/29c1faa7-7d13-412f-8606-9ad556eb1c52 --- .../ClaudeBedrockPromptSchema.ts | 71 +++++++++++++++++++ .../editor/client/src/utils/promptUtils.ts | 3 + 2 files changed, 74 insertions(+) create mode 100644 python/src/aiconfig/editor/client/src/shared/prompt_schemas/ClaudeBedrockPromptSchema.ts diff --git a/python/src/aiconfig/editor/client/src/shared/prompt_schemas/ClaudeBedrockPromptSchema.ts b/python/src/aiconfig/editor/client/src/shared/prompt_schemas/ClaudeBedrockPromptSchema.ts new file mode 100644 index 000000000..7d23e93a3 --- /dev/null +++ b/python/src/aiconfig/editor/client/src/shared/prompt_schemas/ClaudeBedrockPromptSchema.ts @@ -0,0 +1,71 @@ +import { PromptSchema } from "../../utils/promptUtils"; + +export const ClaudeBedrockPromptSchema: PromptSchema = { + // See https://docs.anthropic.com/claude/reference/complete_post + // for settings and defaults. The settings below are supported settings specified in the ClaudeBedrockModelParser + // refine_chat_completion_params implementation. + input: { + type: "string", + }, + model_settings: { + type: "object", + properties: { + model: { + type: "string", + }, + max_tokens_to_sample: { + type: "number", + description: `The maximum number of tokens to generate before stopping. + Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + }, + stop_sequences: { + type: "array", + items: { + type: "string", + }, + description: `Sequences that will cause the model to stop generating.`, + }, + stream: { + type: "boolean", + default: true, + description: `If true, send messages token by token. If false, messages send in bulk.`, + }, + temperature: { + type: "number", + minimum: 0.0, + maximum: 1.0, + description: `Amount of randomness injected into the response.`, + }, + top_p: { + type: "number", + minimum: 0.0, + maximum: 1.0, + description: `In nucleus sampling, we compute the cumulative distribution over all the options for each subsequent token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p. + You should either alter temperature or top_p, but not both.`, + }, + top_k: { + type: "number", + description: `Only sample from the top K options for each subsequent token. + Used to remove "long tail" low probability responses.`, + }, + metadata: { + type: "object", + properties: { + user_id: { + type: "string", + }, + }, + description: `An object describing metadata about the request. (Claude specific)`, + } + }, + required: ["model", "max_tokens_to_sample", "stop_sequences"], + }, + prompt_metadata: { + type: "object", + properties: { + remember_chat_context: { + type: "boolean", + }, + }, + }, +}; diff --git a/python/src/aiconfig/editor/client/src/utils/promptUtils.ts b/python/src/aiconfig/editor/client/src/utils/promptUtils.ts index ef4eb77a6..968aa874c 100644 --- a/python/src/aiconfig/editor/client/src/utils/promptUtils.ts +++ b/python/src/aiconfig/editor/client/src/utils/promptUtils.ts @@ -18,6 +18,7 @@ import { HuggingFaceTextGenerationRemoteInferencePromptSchema } from "../shared/ import { HuggingFaceTextSummarizationRemoteInferencePromptSchema } from "../shared/prompt_schemas/HuggingFaceTextSummarizationRemoteInferencePromptSchema"; import { HuggingFaceTextTranslationRemoteInferencePromptSchema } from "../shared/prompt_schemas/HuggingFaceTextTranslationRemoteInferencePromptSchema"; import { HuggingFaceImage2TextRemoteInferencePromptSchema } from "../shared/prompt_schemas/HuggingFaceImage2TextRemoteInferencePromptSchema"; +import { ClaudeBedrockPromptSchema } from "../shared/prompt_schemas/ClaudeBedrockPromptSchema"; /** * Get the name of the model for the specified prompt. The name will either be specified in the prompt's @@ -81,6 +82,8 @@ export const PROMPT_SCHEMAS: Record = { "dall-e-2": DalleImageGenerationParserPromptSchema, "dall-e-3": DalleImageGenerationParserPromptSchema, + "ClaudeBedrockModelParser": ClaudeBedrockPromptSchema, + HuggingFaceImage2TextRemoteInference: HuggingFaceImage2TextRemoteInferencePromptSchema,