Skip to content

Commit

Permalink
[editor] Claude Bedrock Prompt Schema
Browse files Browse the repository at this point in the history
  • Loading branch information
Ankush Pala [email protected] committed Jan 29, 2024
1 parent 94bb0d8 commit 21d2574
Show file tree
Hide file tree
Showing 2 changed files with 74 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import { PromptSchema } from "../../utils/promptUtils";

export const ClaudeBedrockPromptSchema: PromptSchema = {
// See https://docs.anthropic.com/claude/reference/complete_post
// for settings and defaults. The settings below are supported settings specified in the ClaudeBedrockModelParser
// refine_chat_completion_params implementation.
input: {
type: "string",
},
model_settings: {
type: "object",
properties: {
model: {
type: "string",
},
max_tokens_to_sample: {
type: "number",
description: `The maximum number of tokens to generate before stopping.
Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`,
},
stop_sequences: {
type: "array",
items: {
type: "string",
},
description: `Sequences that will cause the model to stop generating.`,
},
stream: {
type: "boolean",
default: true,
description: `If true, send messages token by token. If false, messages send in bulk.`,
},
temperature: {
type: "number",
minimum: 0.0,
maximum: 1.0,
description: `Amount of randomness injected into the response.`,
},
top_p: {
type: "number",
minimum: 0.0,
maximum: 1.0,
description: `In nucleus sampling, we compute the cumulative distribution over all the options for each subsequent token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p.
You should either alter temperature or top_p, but not both.`,
},
top_k: {
type: "number",
description: `Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.`,
},
metadata: {
type: "object",
properties: {
user_id: {
type: "string",
},
},
description: `An object describing metadata about the request. (Claude specific)`,
}
},
required: ["model", "max_tokens_to_sample", "stop_sequences"],
},
prompt_metadata: {
type: "object",
properties: {
remember_chat_context: {
type: "boolean",
},
},
},
};
3 changes: 3 additions & 0 deletions python/src/aiconfig/editor/client/src/utils/promptUtils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import { HuggingFaceTextGenerationRemoteInferencePromptSchema } from "../shared/
import { HuggingFaceTextSummarizationRemoteInferencePromptSchema } from "../shared/prompt_schemas/HuggingFaceTextSummarizationRemoteInferencePromptSchema";
import { HuggingFaceTextTranslationRemoteInferencePromptSchema } from "../shared/prompt_schemas/HuggingFaceTextTranslationRemoteInferencePromptSchema";
import { HuggingFaceImage2TextRemoteInferencePromptSchema } from "../shared/prompt_schemas/HuggingFaceImage2TextRemoteInferencePromptSchema";
import { ClaudeBedrockPromptSchema } from "../shared/prompt_schemas/ClaudeBedrockPromptSchema";

/**
* Get the name of the model for the specified prompt. The name will either be specified in the prompt's
Expand Down Expand Up @@ -81,6 +82,8 @@ export const PROMPT_SCHEMAS: Record<string, PromptSchema> = {
"dall-e-2": DalleImageGenerationParserPromptSchema,
"dall-e-3": DalleImageGenerationParserPromptSchema,

"ClaudeBedrockModelParser": ClaudeBedrockPromptSchema,

HuggingFaceImage2TextRemoteInference:
HuggingFaceImage2TextRemoteInferencePromptSchema,

Expand Down

0 comments on commit 21d2574

Please sign in to comment.