-
Notifications
You must be signed in to change notification settings - Fork 81
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[editor] Claude Bedrock Prompt Schema
## Testplan https://github.com/lastmile-ai/aiconfig/assets/141073967/29c1faa7-7d13-412f-8606-9ad556eb1c52
- Loading branch information
1 parent
94bb0d8
commit 21d2574
Showing
2 changed files
with
74 additions
and
0 deletions.
There are no files selected for viewing
71 changes: 71 additions & 0 deletions
71
python/src/aiconfig/editor/client/src/shared/prompt_schemas/ClaudeBedrockPromptSchema.ts
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
import { PromptSchema } from "../../utils/promptUtils"; | ||
|
||
export const ClaudeBedrockPromptSchema: PromptSchema = { | ||
// See https://docs.anthropic.com/claude/reference/complete_post | ||
// for settings and defaults. The settings below are supported settings specified in the ClaudeBedrockModelParser | ||
// refine_chat_completion_params implementation. | ||
input: { | ||
type: "string", | ||
}, | ||
model_settings: { | ||
type: "object", | ||
properties: { | ||
model: { | ||
type: "string", | ||
}, | ||
max_tokens_to_sample: { | ||
type: "number", | ||
description: `The maximum number of tokens to generate before stopping. | ||
Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, | ||
}, | ||
stop_sequences: { | ||
type: "array", | ||
items: { | ||
type: "string", | ||
}, | ||
description: `Sequences that will cause the model to stop generating.`, | ||
}, | ||
stream: { | ||
type: "boolean", | ||
default: true, | ||
description: `If true, send messages token by token. If false, messages send in bulk.`, | ||
}, | ||
temperature: { | ||
type: "number", | ||
minimum: 0.0, | ||
maximum: 1.0, | ||
description: `Amount of randomness injected into the response.`, | ||
}, | ||
top_p: { | ||
type: "number", | ||
minimum: 0.0, | ||
maximum: 1.0, | ||
description: `In nucleus sampling, we compute the cumulative distribution over all the options for each subsequent token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p. | ||
You should either alter temperature or top_p, but not both.`, | ||
}, | ||
top_k: { | ||
type: "number", | ||
description: `Only sample from the top K options for each subsequent token. | ||
Used to remove "long tail" low probability responses.`, | ||
}, | ||
metadata: { | ||
type: "object", | ||
properties: { | ||
user_id: { | ||
type: "string", | ||
}, | ||
}, | ||
description: `An object describing metadata about the request. (Claude specific)`, | ||
} | ||
}, | ||
required: ["model", "max_tokens_to_sample", "stop_sequences"], | ||
}, | ||
prompt_metadata: { | ||
type: "object", | ||
properties: { | ||
remember_chat_context: { | ||
type: "boolean", | ||
}, | ||
}, | ||
}, | ||
}; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters