From 75eded51db8c8bcec41cd894f3575374e40a4103 Mon Sep 17 00:00:00 2001 From: ShivangiReja <45216704+ShivangiReja@users.noreply.github.com> Date: Wed, 25 Sep 2024 21:53:30 -0700 Subject: [PATCH] Remove the virtual keyword from the Pipeline property across all clients (#227) Co-authored-by: ShivangiReja --- CHANGELOG.md | 12 +++-- api/OpenAI.netstandard2.0.cs | 48 +++++++++---------- src/Custom/Assistants/AssistantClient.cs | 10 +++- src/Custom/Assistants/GeneratorStubs.cs | 8 ---- .../InternalAssistantMessageClient.cs | 6 +++ .../Internal/InternalAssistantRunClient.cs | 6 +++ .../Internal/InternalAssistantThreadClient.cs | 6 +++ src/Custom/Assistants/RunCreationOptions.cs | 6 ++- src/Custom/Assistants/RunIncompleteReason.cs | 20 ++++++++ src/Custom/Assistants/RunStepTokenUsage.cs | 23 +++++++++ src/Custom/Assistants/ThreadRun.cs | 13 +++-- src/Custom/Audio/AudioClient.cs | 6 +++ src/Custom/Batch/BatchClient.cs | 6 +++ src/Custom/Chat/ChatClient.cs | 6 +++ src/Custom/Chat/OpenAIChatModelFactory.cs | 12 ++--- src/Custom/Embeddings/EmbeddingClient.cs | 6 +++ .../OpenAIEmbeddingsModelFactory.cs | 6 +-- src/Custom/Files/FileClient.cs | 6 +++ .../Files/Internal/InternalUploadsClient.cs | 6 +++ src/Custom/FineTuning/FineTuningClient.cs | 6 +++ src/Custom/Images/ImageClient.cs | 6 +++ .../Internal/LegacyCompletionClient.cs | 6 +++ src/Custom/Models/ModelClient.cs | 6 +++ src/Custom/Moderations/ModerationClient.cs | 6 +++ src/Custom/OpenAIClient.cs | 6 +++ src/Custom/VectorStores/VectorStoreClient.cs | 6 +++ src/Generated/AssistantClient.cs | 2 - src/Generated/AudioClient.cs | 2 - src/Generated/BatchClient.cs | 2 - src/Generated/ChatClient.cs | 2 - src/Generated/EmbeddingClient.cs | 2 - src/Generated/FileClient.cs | 2 - src/Generated/FineTuningClient.cs | 2 - src/Generated/ImageClient.cs | 2 - .../InternalAssistantMessageClient.cs | 2 - src/Generated/InternalAssistantRunClient.cs | 2 - .../InternalAssistantThreadClient.cs | 2 - src/Generated/InternalUploadsClient.cs | 2 - src/Generated/LegacyCompletionClient.cs | 2 - src/Generated/ModelClient.cs | 2 - .../RunCreationOptions.Serialization.cs | 12 ++--- src/Generated/Models/RunCreationOptions.cs | 6 +-- src/Generated/Models/RunIncompleteReason.cs | 7 +-- .../Models/RunStepTokenUsage.Serialization.cs | 6 +-- src/Generated/Models/RunStepTokenUsage.cs | 20 ++++---- .../Models/ThreadRun.Serialization.cs | 8 ++-- src/Generated/Models/ThreadRun.cs | 8 ++-- src/Generated/ModerationClient.cs | 2 - src/Generated/OpenAIClient.cs | 2 - src/Generated/OpenAIModelFactory.cs | 4 +- src/Generated/VectorStoreClient.cs | 2 - tests/Chat/OpenAIChatModelFactoryTests.cs | 10 ++-- .../OpenAIEmbeddingsModelFactoryTests.cs | 6 +-- 53 files changed, 240 insertions(+), 135 deletions(-) create mode 100644 src/Custom/Assistants/RunIncompleteReason.cs create mode 100644 src/Custom/Assistants/RunStepTokenUsage.cs diff --git a/CHANGELOG.md b/CHANGELOG.md index 38dc8d05..c60e3cac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,14 +6,16 @@ ### Breaking Changes -- Refactored `ModerationResult` by merging `ModerationCategories` and `ModerationCategoryScores` into individual `ModerationCategory` properties, each with `Flagged` and `Score` properties. (commit_hash) +- Refactored `ModerationResult` by merging `ModerationCategories` and `ModerationCategoryScores` into individual `ModerationCategory` properties, each with `Flagged` and `Score` properties. (commit_id) - Renamed type `OpenAIFileInfo` to `OpenAIFile` and `OpenAIFileInfoCollection` to `OpenAIFileCollection`. (commit_id) - Renamed type `OpenAIModelInfo` to `OpenAIModel` and `OpenAIModelInfoCollection` to `OpenAIModelCollection`. (commit_id) - Renamed type `Embedding` to `OpenAIEmbedding` and `EmbeddingCollection` to `OpenAIEmbeddingCollection`. (commit_id) -- Renamed property `ImageUrl` to `ImageUri` and method `FromImageUrl` to `FromImageUri` in `MessageContent`. (commit_id) -- Renamed property `ParallelToolCallsEnabled` to `AllowParallelToolCalls` in `RunCreationOptions`, `ThreadRun`, and `ChatCompletionOptions` types. (commit_id) -- Renamed property `PromptTokens` to `InputTokenCount`, `CompletionTokens` to `OutputTokenCount`, and `TotalTokens` to `TotalTokenCount` in `RunTokenUsage`. (commit_id) -- Renamed property `InputTokens` to `InputTokenCount` and `TotalTokens` to `TotalTokenCount` in `EmbeddingTokenUsage`. (commit_id) +- Renamed property `ImageUrl` to `ImageUri` and method `FromImageUrl` to `FromImageUri` in the `MessageContent` type. (commit_id) +- Renamed property `ParallelToolCallsEnabled` to `AllowParallelToolCalls` in the `RunCreationOptions`, `ThreadRun`, and `ChatCompletionOptions` types. (commit_id) +- Renamed properties `PromptTokens` to `InputTokenCount`, `CompletionTokens` to `OutputTokenCount`, and `TotalTokens` to `TotalTokenCount` in the `RunTokenUsage` and `RunStepTokenUsage` types. (commit_id) +- Renamed properties `InputTokens` to `InputTokenCount` and `TotalTokens` to `TotalTokenCount` in the `EmbeddingTokenUsage` type. (commit_id) +- Renamed properties `MaxPromptTokens` to `MaxInputTokenCount` and `MaxCompletionTokens` to `MaxOutputTokenCount` in the `ThreadRun`, `RunCreationOptions`, and `RunIncompleteReason` types. (commit_id) +- Removed the `virtual` keyword from the `Pipeline` property across all clients. (commit_id) ### Bugs Fixed diff --git a/api/OpenAI.netstandard2.0.cs b/api/OpenAI.netstandard2.0.cs index 27ad7145..421ec983 100644 --- a/api/OpenAI.netstandard2.0.cs +++ b/api/OpenAI.netstandard2.0.cs @@ -6,7 +6,7 @@ public class OpenAIClient { protected internal OpenAIClient(ClientPipeline pipeline, OpenAIClientOptions options); public OpenAIClient(string apiKey, OpenAIClientOptions options); public OpenAIClient(string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } public virtual AssistantClient GetAssistantClient(); public virtual AudioClient GetAudioClient(string model); public virtual BatchClient GetBatchClient(); @@ -53,7 +53,7 @@ public class AssistantClient { protected internal AssistantClient(ClientPipeline pipeline, OpenAIClientOptions options); public AssistantClient(string apiKey, OpenAIClientOptions options); public AssistantClient(string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } public virtual ClientResult CancelRun(ThreadRun run); [EditorBrowsable(EditorBrowsableState.Never)] public virtual ClientResult CancelRun(string threadId, string runId, RequestOptions options); @@ -600,8 +600,8 @@ public class RunCreationOptions : IJsonModel, IPersistableMo public IList AdditionalMessages { get; } public bool? AllowParallelToolCalls { get; set; } public string InstructionsOverride { get; set; } - public int? MaxCompletionTokens { get; set; } - public int? MaxPromptTokens { get; set; } + public int? MaxInputTokenCount { get; set; } + public int? MaxOutputTokenCount { get; set; } public IDictionary Metadata { get; } public string ModelOverride { get; set; } public float? NucleusSamplingFactor { get; set; } @@ -654,8 +654,8 @@ public class RunIncompleteDetails : IJsonModel, IPersistab private readonly object _dummy; private readonly int _dummyPrimitive; public RunIncompleteReason(string value); - public static RunIncompleteReason MaxCompletionTokens { get; } - public static RunIncompleteReason MaxPromptTokens { get; } + public static RunIncompleteReason MaxInputTokenCount { get; } + public static RunIncompleteReason MaxOutputTokenCount { get; } public readonly bool Equals(RunIncompleteReason other); [EditorBrowsable(EditorBrowsableState.Never)] public override readonly bool Equals(object obj); @@ -826,9 +826,9 @@ public class RunStepFileSearchResult : IJsonModel, IPer public override readonly string ToString(); } public class RunStepTokenUsage : IJsonModel, IPersistableModel { - public int CompletionTokens { get; } - public int PromptTokens { get; } - public int TotalTokens { get; } + public int InputTokenCount { get; } + public int OutputTokenCount { get; } + public int TotalTokenCount { get; } RunStepTokenUsage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options); void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options); RunStepTokenUsage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options); @@ -1022,8 +1022,8 @@ public class ThreadRun : IJsonModel, IPersistableModel { public RunIncompleteDetails IncompleteDetails { get; } public string Instructions { get; } public RunError LastError { get; } - public int? MaxCompletionTokens { get; } - public int? MaxPromptTokens { get; } + public int? MaxInputTokenCount { get; } + public int? MaxOutputTokenCount { get; } public IReadOnlyDictionary Metadata { get; } public string Model { get; } public float? NucleusSamplingFactor { get; } @@ -1114,7 +1114,7 @@ public class AudioClient { public AudioClient(string model, ApiKeyCredential credential); public AudioClient(string model, string apiKey, OpenAIClientOptions options); public AudioClient(string model, string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } [EditorBrowsable(EditorBrowsableState.Never)] public virtual ClientResult GenerateSpeech(BinaryContent content, RequestOptions options = null); public virtual ClientResult GenerateSpeech(string text, GeneratedSpeechVoice voice, SpeechGenerationOptions options = null, CancellationToken cancellationToken = default); @@ -1306,7 +1306,7 @@ public class BatchClient { protected internal BatchClient(ClientPipeline pipeline, OpenAIClientOptions options); public BatchClient(string apiKey, OpenAIClientOptions options); public BatchClient(string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } public virtual ClientResult CancelBatch(string batchId, RequestOptions options); public virtual Task CancelBatchAsync(string batchId, RequestOptions options); public virtual ClientResult CreateBatch(BinaryContent content, RequestOptions options = null); @@ -1343,7 +1343,7 @@ public class ChatClient { public ChatClient(string model, ApiKeyCredential credential); public ChatClient(string model, string apiKey, OpenAIClientOptions options); public ChatClient(string model, string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } public virtual ClientResult CompleteChat(params ChatMessage[] messages); [EditorBrowsable(EditorBrowsableState.Never)] public virtual ClientResult CompleteChat(BinaryContent content, RequestOptions options = null); @@ -1660,10 +1660,10 @@ public class FunctionChatMessage : ChatMessage, IJsonModel, } public static class OpenAIChatModelFactory { public static ChatCompletion ChatCompletion(string id = null, ChatFinishReason finishReason = ChatFinishReason.Stop, IEnumerable content = null, string refusal = null, IEnumerable toolCalls = null, ChatMessageRole role = ChatMessageRole.System, ChatFunctionCall functionCall = null, IEnumerable contentTokenLogProbabilities = null, IEnumerable refusalTokenLogProbabilities = null, DateTimeOffset createdAt = default, string model = null, string systemFingerprint = null, ChatTokenUsage usage = null); - public static ChatOutputTokenUsageDetails ChatOutputTokenUsageDetails(int reasoningTokens = 0); + public static ChatOutputTokenUsageDetails ChatOutputTokenUsageDetails(int reasoningTokenCount = 0); public static ChatTokenLogProbabilityDetails ChatTokenLogProbabilityDetails(string token = null, float logProbability = 0, ReadOnlyMemory? utf8Bytes = null, IEnumerable topLogProbabilities = null); public static ChatTokenTopLogProbabilityDetails ChatTokenTopLogProbabilityDetails(string token = null, float logProbability = 0, ReadOnlyMemory? utf8Bytes = null); - public static ChatTokenUsage ChatTokenUsage(int outputTokens = 0, int inputTokens = 0, int totalTokens = 0, ChatOutputTokenUsageDetails outputTokenDetails = null); + public static ChatTokenUsage ChatTokenUsage(int outputTokenCount = 0, int inputTokenCount = 0, int totalTokenCount = 0, ChatOutputTokenUsageDetails outputTokenDetails = null); public static StreamingChatCompletionUpdate StreamingChatCompletionUpdate(string id = null, IEnumerable contentUpdate = null, StreamingChatFunctionCallUpdate functionCallUpdate = null, IEnumerable toolCallUpdates = null, ChatMessageRole? role = null, string refusalUpdate = null, IEnumerable contentTokenLogProbabilities = null, IEnumerable refusalTokenLogProbabilities = null, ChatFinishReason? finishReason = null, DateTimeOffset createdAt = default, string model = null, string systemFingerprint = null, ChatTokenUsage usage = null); [Obsolete("This class is obsolete. Please use StreamingChatToolCallUpdate instead.")] public static StreamingChatFunctionCallUpdate StreamingChatFunctionCallUpdate(string functionArgumentsUpdate = null, string functionName = null); @@ -1754,7 +1754,7 @@ public class EmbeddingClient { public EmbeddingClient(string model, ApiKeyCredential credential); public EmbeddingClient(string model, string apiKey, OpenAIClientOptions options); public EmbeddingClient(string model, string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } public virtual ClientResult GenerateEmbedding(string input, EmbeddingGenerationOptions options = null, CancellationToken cancellationToken = default); public virtual Task> GenerateEmbeddingAsync(string input, EmbeddingGenerationOptions options = null, CancellationToken cancellationToken = default); [EditorBrowsable(EditorBrowsableState.Never)] @@ -1803,7 +1803,7 @@ public class OpenAIEmbeddingCollection : ObjectModel.ReadOnlyCollection.Write(ModelReaderWriterOptions options); } public static class OpenAIEmbeddingsModelFactory { - public static EmbeddingTokenUsage EmbeddingTokenUsage(int inputTokens = 0, int totalTokens = 0); + public static EmbeddingTokenUsage EmbeddingTokenUsage(int inputTokenCount = 0, int totalTokenCount = 0); public static OpenAIEmbedding OpenAIEmbedding(int index = 0, IEnumerable vector = null); public static OpenAIEmbeddingCollection OpenAIEmbeddingCollection(IEnumerable items = null, string model = null, EmbeddingTokenUsage usage = null); } @@ -1816,7 +1816,7 @@ public class FileClient { protected internal FileClient(ClientPipeline pipeline, OpenAIClientOptions options); public FileClient(string apiKey, OpenAIClientOptions options); public FileClient(string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } public virtual ClientResult AddUploadPart(string uploadId, BinaryContent content, string contentType, RequestOptions options = null); public virtual Task AddUploadPartAsync(string uploadId, BinaryContent content, string contentType, RequestOptions options = null); public virtual ClientResult CancelUpload(string uploadId, RequestOptions options = null); @@ -1965,7 +1965,7 @@ public class FineTuningClient { protected internal FineTuningClient(ClientPipeline pipeline, OpenAIClientOptions options); public FineTuningClient(string apiKey, OpenAIClientOptions options); public FineTuningClient(string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } public virtual ClientResult CancelJob(string jobId, RequestOptions options); public virtual Task CancelJobAsync(string jobId, RequestOptions options); public virtual ClientResult CreateJob(BinaryContent content, RequestOptions options = null); @@ -2036,7 +2036,7 @@ public class ImageClient { public ImageClient(string model, ApiKeyCredential credential); public ImageClient(string model, string apiKey, OpenAIClientOptions options); public ImageClient(string model, string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } public virtual ClientResult GenerateImage(string prompt, ImageGenerationOptions options = null, CancellationToken cancellationToken = default); public virtual Task> GenerateImageAsync(string prompt, ImageGenerationOptions options = null, CancellationToken cancellationToken = default); public virtual ClientResult GenerateImageEdit(Stream image, string imageFilename, string prompt, ImageEditOptions options = null, CancellationToken cancellationToken = default); @@ -2123,7 +2123,7 @@ public class ModelClient { protected internal ModelClient(ClientPipeline pipeline, OpenAIClientOptions options); public ModelClient(string apiKey, OpenAIClientOptions options); public ModelClient(string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } [EditorBrowsable(EditorBrowsableState.Never)] public virtual ClientResult DeleteModel(string model, RequestOptions options); public virtual ClientResult DeleteModel(string model, CancellationToken cancellationToken = default); @@ -2187,7 +2187,7 @@ public class ModerationClient { public ModerationClient(string model, ApiKeyCredential credential); public ModerationClient(string model, string apiKey, OpenAIClientOptions options); public ModerationClient(string model, string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } [EditorBrowsable(EditorBrowsableState.Never)] public virtual ClientResult ClassifyText(BinaryContent content, RequestOptions options = null); public virtual ClientResult ClassifyText(IEnumerable inputs, CancellationToken cancellationToken = default); @@ -2315,7 +2315,7 @@ public class VectorStoreClient { protected internal VectorStoreClient(ClientPipeline pipeline, OpenAIClientOptions options); public VectorStoreClient(string apiKey, OpenAIClientOptions options); public VectorStoreClient(string apiKey); - public virtual ClientPipeline Pipeline { get; } + public ClientPipeline Pipeline { get; } public virtual ClientResult AddFileToVectorStore(VectorStore vectorStore, OpenAIFile file); [EditorBrowsable(EditorBrowsableState.Never)] public virtual ClientResult AddFileToVectorStore(string vectorStoreId, BinaryContent content, RequestOptions options = null); diff --git a/src/Custom/Assistants/AssistantClient.cs b/src/Custom/Assistants/AssistantClient.cs index 11854de6..22bdb229 100644 --- a/src/Custom/Assistants/AssistantClient.cs +++ b/src/Custom/Assistants/AssistantClient.cs @@ -30,6 +30,12 @@ public partial class AssistantClient private readonly InternalAssistantRunClient _runSubClient; private readonly InternalAssistantThreadClient _threadSubClient; + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The API key to authenticate with the service. @@ -1263,8 +1269,8 @@ private static BinaryContent CreateThreadAndRunProtocolContent( runOptions.Temperature, runOptions.NucleusSamplingFactor, runOptions.Stream, - runOptions.MaxPromptTokens, - runOptions.MaxCompletionTokens, + runOptions.MaxInputTokenCount, + runOptions.MaxOutputTokenCount, runOptions.TruncationStrategy, runOptions.ToolConstraint, runOptions.AllowParallelToolCalls, diff --git a/src/Custom/Assistants/GeneratorStubs.cs b/src/Custom/Assistants/GeneratorStubs.cs index a384ae6d..ef271225 100644 --- a/src/Custom/Assistants/GeneratorStubs.cs +++ b/src/Custom/Assistants/GeneratorStubs.cs @@ -34,10 +34,6 @@ public readonly partial struct RunErrorCode { } [CodeGenModel("RunObjectIncompleteDetails")] public partial class RunIncompleteDetails { } -[Experimental("OPENAI001")] -[CodeGenModel("RunObjectIncompleteDetailsReason")] -public readonly partial struct RunIncompleteReason { } - [Experimental("OPENAI001")] [CodeGenModel("RunStepObjectType")] public readonly partial struct RunStepType { } @@ -54,10 +50,6 @@ public partial class RunStepError { } [CodeGenModel("RunStepObjectLastErrorCode")] public readonly partial struct RunStepErrorCode { } -[Experimental("OPENAI001")] -[CodeGenModel("RunStepCompletionUsage")] -public partial class RunStepTokenUsage { } - [Experimental("OPENAI001")] [CodeGenModel("RunStepDetailsToolCallsCodeObjectCodeInterpreterOutputsObject")] public partial class RunStepCodeInterpreterOutput { } diff --git a/src/Custom/Assistants/Internal/InternalAssistantMessageClient.cs b/src/Custom/Assistants/Internal/InternalAssistantMessageClient.cs index 800de448..e8f76476 100644 --- a/src/Custom/Assistants/Internal/InternalAssistantMessageClient.cs +++ b/src/Custom/Assistants/Internal/InternalAssistantMessageClient.cs @@ -18,6 +18,12 @@ namespace OpenAI.Assistants; [CodeGenSuppress("DeleteMessage", typeof(string), typeof(string))] internal partial class InternalAssistantMessageClient { + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: // - Used a custom pipeline. // - Demoted the endpoint parameter to be a property in the options class. diff --git a/src/Custom/Assistants/Internal/InternalAssistantRunClient.cs b/src/Custom/Assistants/Internal/InternalAssistantRunClient.cs index a1237b54..9c3ca21e 100644 --- a/src/Custom/Assistants/Internal/InternalAssistantRunClient.cs +++ b/src/Custom/Assistants/Internal/InternalAssistantRunClient.cs @@ -26,6 +26,12 @@ namespace OpenAI.Assistants; [CodeGenSuppress("GetRunStep", typeof(string), typeof(string), typeof(string))] internal partial class InternalAssistantRunClient { + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: // - Used a custom pipeline. // - Demoted the endpoint parameter to be a property in the options class. diff --git a/src/Custom/Assistants/Internal/InternalAssistantThreadClient.cs b/src/Custom/Assistants/Internal/InternalAssistantThreadClient.cs index 295a8a49..f0cb90c9 100644 --- a/src/Custom/Assistants/Internal/InternalAssistantThreadClient.cs +++ b/src/Custom/Assistants/Internal/InternalAssistantThreadClient.cs @@ -16,6 +16,12 @@ namespace OpenAI.Assistants; [CodeGenSuppress("DeleteThread", typeof(string))] internal partial class InternalAssistantThreadClient { + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: // - Used a custom pipeline. // - Demoted the endpoint parameter to be a property in the options class. diff --git a/src/Custom/Assistants/RunCreationOptions.cs b/src/Custom/Assistants/RunCreationOptions.cs index dacf4602..57500661 100644 --- a/src/Custom/Assistants/RunCreationOptions.cs +++ b/src/Custom/Assistants/RunCreationOptions.cs @@ -115,10 +115,12 @@ private set public float? NucleusSamplingFactor { get; set; } /// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - public int? MaxPromptTokens { get; set; } + [CodeGenMember("MaxPromptTokens")] + public int? MaxInputTokenCount { get; set; } /// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. - public int? MaxCompletionTokens { get; set; } + [CodeGenMember("MaxCompletionTokens")] + public int? MaxOutputTokenCount { get; set; } /// Gets or sets the truncation strategy. public RunTruncationStrategy TruncationStrategy { get; set; } diff --git a/src/Custom/Assistants/RunIncompleteReason.cs b/src/Custom/Assistants/RunIncompleteReason.cs new file mode 100644 index 00000000..5b17c95d --- /dev/null +++ b/src/Custom/Assistants/RunIncompleteReason.cs @@ -0,0 +1,20 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Text; + +namespace OpenAI.Assistants +{ + [Experimental("OPENAI001")] + [CodeGenModel("RunObjectIncompleteDetailsReason")] + public readonly partial struct RunIncompleteReason + { + // CUSTOM: Renamed. + [CodeGenMember("MaxCompletionTokens")] + public static RunIncompleteReason MaxOutputTokenCount { get; } = new RunIncompleteReason(MaxOutputTokenCountValue); + + // CUSTOM: Renamed. + [CodeGenMember("MaxPromptTokens")] + public static RunIncompleteReason MaxInputTokenCount { get; } = new RunIncompleteReason(MaxInputTokenCountValue); + } +} diff --git a/src/Custom/Assistants/RunStepTokenUsage.cs b/src/Custom/Assistants/RunStepTokenUsage.cs new file mode 100644 index 00000000..66f01c90 --- /dev/null +++ b/src/Custom/Assistants/RunStepTokenUsage.cs @@ -0,0 +1,23 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; + +namespace OpenAI.Assistants +{ + [Experimental("OPENAI001")] + [CodeGenModel("RunStepCompletionUsage")] + public partial class RunStepTokenUsage + { + // CUSTOM: Renamed. + [CodeGenMember("CompletionTokens")] + public int OutputTokenCount { get; } + + // CUSTOM: Renamed. + [CodeGenMember("PromptTokens")] + public int InputTokenCount { get; } + + // CUSTOM: Renamed. + [CodeGenMember("TotalTokens")] + public int TotalTokenCount { get; } + } +} diff --git a/src/Custom/Assistants/ThreadRun.cs b/src/Custom/Assistants/ThreadRun.cs index f3cbd61e..0d8a04fe 100644 --- a/src/Custom/Assistants/ThreadRun.cs +++ b/src/Custom/Assistants/ThreadRun.cs @@ -23,7 +23,7 @@ public partial class ThreadRun internal readonly InternalRunRequiredAction _internalRequiredAction; // CUSTOM: Removed null check for `toolConstraint` and `responseFormat`. - internal ThreadRun(string id, DateTimeOffset createdAt, string threadId, string assistantId, RunStatus status, InternalRunRequiredAction internalRequiredAction, RunError lastError, DateTimeOffset? expiresAt, DateTimeOffset? startedAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, RunIncompleteDetails incompleteDetails, string model, string instructions, IEnumerable tools, IReadOnlyDictionary metadata, RunTokenUsage usage, int? maxPromptTokens, int? maxCompletionTokens, RunTruncationStrategy truncationStrategy, ToolConstraint toolConstraint, bool? allowParallelToolCalls, AssistantResponseFormat responseFormat) + internal ThreadRun(string id, DateTimeOffset createdAt, string threadId, string assistantId, RunStatus status, InternalRunRequiredAction internalRequiredAction, RunError lastError, DateTimeOffset? expiresAt, DateTimeOffset? startedAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, RunIncompleteDetails incompleteDetails, string model, string instructions, IEnumerable tools, IReadOnlyDictionary metadata, RunTokenUsage usage, int? maxInputTokenCount, int? maxOutputTokenCount, RunTruncationStrategy truncationStrategy, ToolConstraint toolConstraint, bool? allowParallelToolCalls, AssistantResponseFormat responseFormat) { Argument.AssertNotNull(id, nameof(id)); Argument.AssertNotNull(threadId, nameof(threadId)); @@ -50,8 +50,8 @@ internal ThreadRun(string id, DateTimeOffset createdAt, string threadId, string Tools = tools.ToList(); Metadata = metadata; Usage = usage; - MaxPromptTokens = maxPromptTokens; - MaxCompletionTokens = maxCompletionTokens; + MaxInputTokenCount = maxInputTokenCount; + MaxOutputTokenCount = maxOutputTokenCount; TruncationStrategy = truncationStrategy; ToolConstraint = toolConstraint; AllowParallelToolCalls = allowParallelToolCalls; @@ -95,4 +95,11 @@ internal ThreadRun(string id, DateTimeOffset createdAt, string threadId, string [CodeGenMember("ParallelToolCalls")] public bool? AllowParallelToolCalls { get; } + // CUSTOM: Renamed. + [CodeGenMember("MaxPromptTokens")] + public int? MaxInputTokenCount { get; } + + // CUSTOM: Renamed. + [CodeGenMember("MaxCompletionTokens")] + public int? MaxOutputTokenCount { get; } } diff --git a/src/Custom/Audio/AudioClient.cs b/src/Custom/Audio/AudioClient.cs index a0801cf2..9da550e8 100644 --- a/src/Custom/Audio/AudioClient.cs +++ b/src/Custom/Audio/AudioClient.cs @@ -24,6 +24,12 @@ public partial class AudioClient { private readonly string _model; + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The name of the model to use in requests sent to the service. To learn more about the available models, see . diff --git a/src/Custom/Batch/BatchClient.cs b/src/Custom/Batch/BatchClient.cs index 72225c58..0f9a6b2a 100644 --- a/src/Custom/Batch/BatchClient.cs +++ b/src/Custom/Batch/BatchClient.cs @@ -24,6 +24,12 @@ namespace OpenAI.Batch; [CodeGenSuppress("GetBatchesAsync", typeof(string), typeof(int?))] public partial class BatchClient { + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The API key to authenticate with the service. diff --git a/src/Custom/Chat/ChatClient.cs b/src/Custom/Chat/ChatClient.cs index 6d250eee..54a94f93 100644 --- a/src/Custom/Chat/ChatClient.cs +++ b/src/Custom/Chat/ChatClient.cs @@ -23,6 +23,12 @@ public partial class ChatClient private readonly string _model; private readonly OpenTelemetrySource _telemetry; + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The name of the model to use in requests sent to the service. To learn more about the available models, see . diff --git a/src/Custom/Chat/OpenAIChatModelFactory.cs b/src/Custom/Chat/OpenAIChatModelFactory.cs index 48a51cc1..e8616ca7 100644 --- a/src/Custom/Chat/OpenAIChatModelFactory.cs +++ b/src/Custom/Chat/OpenAIChatModelFactory.cs @@ -90,21 +90,21 @@ public static ChatTokenTopLogProbabilityDetails ChatTokenTopLogProbabilityDetail /// Initializes a new instance of . /// A new instance for mocking. - public static ChatTokenUsage ChatTokenUsage(int outputTokens = default, int inputTokens = default, int totalTokens = default, ChatOutputTokenUsageDetails outputTokenDetails = default) + public static ChatTokenUsage ChatTokenUsage(int outputTokenCount = default, int inputTokenCount = default, int totalTokenCount = default, ChatOutputTokenUsageDetails outputTokenDetails = null) { return new ChatTokenUsage( - outputTokens, - inputTokens, - totalTokens, + outputTokenCount, + inputTokenCount, + totalTokenCount, outputTokenDetails, serializedAdditionalRawData: null); } /// Initializes a new instance of . /// A new instance for mocking. - public static ChatOutputTokenUsageDetails ChatOutputTokenUsageDetails(int reasoningTokens = default) + public static ChatOutputTokenUsageDetails ChatOutputTokenUsageDetails(int reasoningTokenCount = default) { - return new ChatOutputTokenUsageDetails(reasoningTokens, serializedAdditionalRawData: null); + return new ChatOutputTokenUsageDetails(reasoningTokenCount, serializedAdditionalRawData: null); } /// Initializes a new instance of . diff --git a/src/Custom/Embeddings/EmbeddingClient.cs b/src/Custom/Embeddings/EmbeddingClient.cs index ceacabec..337a7f96 100644 --- a/src/Custom/Embeddings/EmbeddingClient.cs +++ b/src/Custom/Embeddings/EmbeddingClient.cs @@ -21,6 +21,12 @@ public partial class EmbeddingClient { private readonly string _model; + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The name of the model to use in requests sent to the service. To learn more about the available models, see . diff --git a/src/Custom/Embeddings/OpenAIEmbeddingsModelFactory.cs b/src/Custom/Embeddings/OpenAIEmbeddingsModelFactory.cs index 0ab64eac..c4fd9132 100644 --- a/src/Custom/Embeddings/OpenAIEmbeddingsModelFactory.cs +++ b/src/Custom/Embeddings/OpenAIEmbeddingsModelFactory.cs @@ -33,11 +33,11 @@ public static OpenAIEmbeddingCollection OpenAIEmbeddingCollection(IEnumerable Initializes a new instance of . /// A new instance for mocking. - public static EmbeddingTokenUsage EmbeddingTokenUsage(int inputTokens = default, int totalTokens = default) + public static EmbeddingTokenUsage EmbeddingTokenUsage(int inputTokenCount = default, int totalTokenCount = default) { return new EmbeddingTokenUsage( - inputTokens, - totalTokens, + inputTokenCount, + totalTokenCount, serializedAdditionalRawData: null); } } diff --git a/src/Custom/Files/FileClient.cs b/src/Custom/Files/FileClient.cs index 1968eaa3..6b876198 100644 --- a/src/Custom/Files/FileClient.cs +++ b/src/Custom/Files/FileClient.cs @@ -27,6 +27,12 @@ public partial class FileClient { private InternalUploadsClient _internalUploadsClient; + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The name of the model to use in requests sent to the service. To learn more about the available models, see . diff --git a/src/Custom/Files/Internal/InternalUploadsClient.cs b/src/Custom/Files/Internal/InternalUploadsClient.cs index 0e9b3915..8150ce1b 100644 --- a/src/Custom/Files/Internal/InternalUploadsClient.cs +++ b/src/Custom/Files/Internal/InternalUploadsClient.cs @@ -8,6 +8,12 @@ namespace OpenAI.Files; [CodeGenSuppress("InternalUploadsClient", typeof(ClientPipeline), typeof(ApiKeyCredential), typeof(Uri))] internal partial class InternalUploadsClient { + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: // - Used a custom pipeline. // - Demoted the endpoint parameter to be a property in the options class. diff --git a/src/Custom/FineTuning/FineTuningClient.cs b/src/Custom/FineTuning/FineTuningClient.cs index d324d185..ee47730a 100644 --- a/src/Custom/FineTuning/FineTuningClient.cs +++ b/src/Custom/FineTuning/FineTuningClient.cs @@ -27,6 +27,12 @@ namespace OpenAI.FineTuning; [CodeGenSuppress("GetFineTuningJobCheckpoints", typeof(string), typeof(string), typeof(int?))] public partial class FineTuningClient { + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The API key to authenticate with the service. diff --git a/src/Custom/Images/ImageClient.cs b/src/Custom/Images/ImageClient.cs index 29fb381b..9077805e 100644 --- a/src/Custom/Images/ImageClient.cs +++ b/src/Custom/Images/ImageClient.cs @@ -25,6 +25,12 @@ public partial class ImageClient { private readonly string _model; + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The name of the model to use in requests sent to the service. To learn more about the available models, see . diff --git a/src/Custom/LegacyCompletions/Internal/LegacyCompletionClient.cs b/src/Custom/LegacyCompletions/Internal/LegacyCompletionClient.cs index b75677d6..53103aaa 100644 --- a/src/Custom/LegacyCompletions/Internal/LegacyCompletionClient.cs +++ b/src/Custom/LegacyCompletions/Internal/LegacyCompletionClient.cs @@ -15,6 +15,12 @@ internal partial class LegacyCompletionClient { private readonly string _model; + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The name of the model to use in requests sent to the service. To learn more about the available models, see . diff --git a/src/Custom/Models/ModelClient.cs b/src/Custom/Models/ModelClient.cs index 73c39625..44079d90 100644 --- a/src/Custom/Models/ModelClient.cs +++ b/src/Custom/Models/ModelClient.cs @@ -22,6 +22,12 @@ namespace OpenAI.Models; public partial class ModelClient { + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The API key to authenticate with the service. diff --git a/src/Custom/Moderations/ModerationClient.cs b/src/Custom/Moderations/ModerationClient.cs index 2f590206..aeb7a2b1 100644 --- a/src/Custom/Moderations/ModerationClient.cs +++ b/src/Custom/Moderations/ModerationClient.cs @@ -21,6 +21,12 @@ public partial class ModerationClient { private readonly string _model; + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The name of the model to use in requests sent to the service. To learn more about the available models, see . diff --git a/src/Custom/OpenAIClient.cs b/src/Custom/OpenAIClient.cs index a79bba34..f145cd54 100644 --- a/src/Custom/OpenAIClient.cs +++ b/src/Custom/OpenAIClient.cs @@ -74,6 +74,12 @@ private static class KnownHeaderNames private readonly OpenAIClientOptions _options; + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The API key to authenticate with the service. diff --git a/src/Custom/VectorStores/VectorStoreClient.cs b/src/Custom/VectorStores/VectorStoreClient.cs index 4b01f3aa..a7d76d4c 100644 --- a/src/Custom/VectorStores/VectorStoreClient.cs +++ b/src/Custom/VectorStores/VectorStoreClient.cs @@ -43,6 +43,12 @@ namespace OpenAI.VectorStores; [Experimental("OPENAI001")] public partial class VectorStoreClient { + // CUSTOM: Remove virtual keyword. + /// + /// The HTTP pipeline for sending and receiving REST requests and responses. + /// + public ClientPipeline Pipeline => _pipeline; + // CUSTOM: Added as a convenience. /// Initializes a new instance of . /// The API key to authenticate with the service. diff --git a/src/Generated/AssistantClient.cs b/src/Generated/AssistantClient.cs index 14c304e8..34fb05f5 100644 --- a/src/Generated/AssistantClient.cs +++ b/src/Generated/AssistantClient.cs @@ -18,8 +18,6 @@ public partial class AssistantClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected AssistantClient() { } diff --git a/src/Generated/AudioClient.cs b/src/Generated/AudioClient.cs index 8b5a4f1d..285c3ffd 100644 --- a/src/Generated/AudioClient.cs +++ b/src/Generated/AudioClient.cs @@ -18,8 +18,6 @@ public partial class AudioClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected AudioClient() { } diff --git a/src/Generated/BatchClient.cs b/src/Generated/BatchClient.cs index 3ccd2f90..b9cca2ee 100644 --- a/src/Generated/BatchClient.cs +++ b/src/Generated/BatchClient.cs @@ -19,8 +19,6 @@ public partial class BatchClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected BatchClient() { } diff --git a/src/Generated/ChatClient.cs b/src/Generated/ChatClient.cs index 6db94d5e..32af72c9 100644 --- a/src/Generated/ChatClient.cs +++ b/src/Generated/ChatClient.cs @@ -18,8 +18,6 @@ public partial class ChatClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected ChatClient() { } diff --git a/src/Generated/EmbeddingClient.cs b/src/Generated/EmbeddingClient.cs index e93cade4..6f7f1c81 100644 --- a/src/Generated/EmbeddingClient.cs +++ b/src/Generated/EmbeddingClient.cs @@ -18,8 +18,6 @@ public partial class EmbeddingClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected EmbeddingClient() { } diff --git a/src/Generated/FileClient.cs b/src/Generated/FileClient.cs index 131f6ba5..cc6ed6cf 100644 --- a/src/Generated/FileClient.cs +++ b/src/Generated/FileClient.cs @@ -18,8 +18,6 @@ public partial class FileClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected FileClient() { } diff --git a/src/Generated/FineTuningClient.cs b/src/Generated/FineTuningClient.cs index c4446699..79ffc1d8 100644 --- a/src/Generated/FineTuningClient.cs +++ b/src/Generated/FineTuningClient.cs @@ -18,8 +18,6 @@ public partial class FineTuningClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected FineTuningClient() { } diff --git a/src/Generated/ImageClient.cs b/src/Generated/ImageClient.cs index 6e0558f0..a92809a8 100644 --- a/src/Generated/ImageClient.cs +++ b/src/Generated/ImageClient.cs @@ -18,8 +18,6 @@ public partial class ImageClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected ImageClient() { } diff --git a/src/Generated/InternalAssistantMessageClient.cs b/src/Generated/InternalAssistantMessageClient.cs index 5ad0ce02..daee4b08 100644 --- a/src/Generated/InternalAssistantMessageClient.cs +++ b/src/Generated/InternalAssistantMessageClient.cs @@ -18,8 +18,6 @@ internal partial class InternalAssistantMessageClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected InternalAssistantMessageClient() { } diff --git a/src/Generated/InternalAssistantRunClient.cs b/src/Generated/InternalAssistantRunClient.cs index b079a8a5..34c15623 100644 --- a/src/Generated/InternalAssistantRunClient.cs +++ b/src/Generated/InternalAssistantRunClient.cs @@ -18,8 +18,6 @@ internal partial class InternalAssistantRunClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected InternalAssistantRunClient() { } diff --git a/src/Generated/InternalAssistantThreadClient.cs b/src/Generated/InternalAssistantThreadClient.cs index 347ef4bd..8c7c5d69 100644 --- a/src/Generated/InternalAssistantThreadClient.cs +++ b/src/Generated/InternalAssistantThreadClient.cs @@ -18,8 +18,6 @@ internal partial class InternalAssistantThreadClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected InternalAssistantThreadClient() { } diff --git a/src/Generated/InternalUploadsClient.cs b/src/Generated/InternalUploadsClient.cs index 4c6d033a..afa20147 100644 --- a/src/Generated/InternalUploadsClient.cs +++ b/src/Generated/InternalUploadsClient.cs @@ -18,8 +18,6 @@ internal partial class InternalUploadsClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected InternalUploadsClient() { } diff --git a/src/Generated/LegacyCompletionClient.cs b/src/Generated/LegacyCompletionClient.cs index efb44faa..6ce31040 100644 --- a/src/Generated/LegacyCompletionClient.cs +++ b/src/Generated/LegacyCompletionClient.cs @@ -18,8 +18,6 @@ internal partial class LegacyCompletionClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected LegacyCompletionClient() { } diff --git a/src/Generated/ModelClient.cs b/src/Generated/ModelClient.cs index 9771fb0d..432d1e24 100644 --- a/src/Generated/ModelClient.cs +++ b/src/Generated/ModelClient.cs @@ -18,8 +18,6 @@ public partial class ModelClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected ModelClient() { } diff --git a/src/Generated/Models/RunCreationOptions.Serialization.cs b/src/Generated/Models/RunCreationOptions.Serialization.cs index 1e534942..2b255bd4 100644 --- a/src/Generated/Models/RunCreationOptions.Serialization.cs +++ b/src/Generated/Models/RunCreationOptions.Serialization.cs @@ -150,24 +150,24 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWrit writer.WriteNull("stream"); } } - if (SerializedAdditionalRawData?.ContainsKey("max_prompt_tokens") != true && Optional.IsDefined(MaxPromptTokens)) + if (SerializedAdditionalRawData?.ContainsKey("max_prompt_tokens") != true && Optional.IsDefined(MaxInputTokenCount)) { - if (MaxPromptTokens != null) + if (MaxInputTokenCount != null) { writer.WritePropertyName("max_prompt_tokens"u8); - writer.WriteNumberValue(MaxPromptTokens.Value); + writer.WriteNumberValue(MaxInputTokenCount.Value); } else { writer.WriteNull("max_prompt_tokens"); } } - if (SerializedAdditionalRawData?.ContainsKey("max_completion_tokens") != true && Optional.IsDefined(MaxCompletionTokens)) + if (SerializedAdditionalRawData?.ContainsKey("max_completion_tokens") != true && Optional.IsDefined(MaxOutputTokenCount)) { - if (MaxCompletionTokens != null) + if (MaxOutputTokenCount != null) { writer.WritePropertyName("max_completion_tokens"u8); - writer.WriteNumberValue(MaxCompletionTokens.Value); + writer.WriteNumberValue(MaxOutputTokenCount.Value); } else { diff --git a/src/Generated/Models/RunCreationOptions.cs b/src/Generated/Models/RunCreationOptions.cs index 7205b3c9..d0cc3330 100644 --- a/src/Generated/Models/RunCreationOptions.cs +++ b/src/Generated/Models/RunCreationOptions.cs @@ -11,7 +11,7 @@ public partial class RunCreationOptions { internal IDictionary SerializedAdditionalRawData { get; set; } - internal RunCreationOptions(string assistantId, string modelOverride, string instructionsOverride, string additionalInstructions, IList internalMessages, IList toolsOverride, IDictionary metadata, float? temperature, float? nucleusSamplingFactor, bool? stream, int? maxPromptTokens, int? maxCompletionTokens, RunTruncationStrategy truncationStrategy, ToolConstraint toolConstraint, bool? allowParallelToolCalls, AssistantResponseFormat responseFormat, IDictionary serializedAdditionalRawData) + internal RunCreationOptions(string assistantId, string modelOverride, string instructionsOverride, string additionalInstructions, IList internalMessages, IList toolsOverride, IDictionary metadata, float? temperature, float? nucleusSamplingFactor, bool? stream, int? maxInputTokenCount, int? maxOutputTokenCount, RunTruncationStrategy truncationStrategy, ToolConstraint toolConstraint, bool? allowParallelToolCalls, AssistantResponseFormat responseFormat, IDictionary serializedAdditionalRawData) { AssistantId = assistantId; ModelOverride = modelOverride; @@ -23,8 +23,8 @@ internal RunCreationOptions(string assistantId, string modelOverride, string ins Temperature = temperature; NucleusSamplingFactor = nucleusSamplingFactor; Stream = stream; - MaxPromptTokens = maxPromptTokens; - MaxCompletionTokens = maxCompletionTokens; + MaxInputTokenCount = maxInputTokenCount; + MaxOutputTokenCount = maxOutputTokenCount; TruncationStrategy = truncationStrategy; ToolConstraint = toolConstraint; AllowParallelToolCalls = allowParallelToolCalls; diff --git a/src/Generated/Models/RunIncompleteReason.cs b/src/Generated/Models/RunIncompleteReason.cs index b0baa9e8..28cdbd67 100644 --- a/src/Generated/Models/RunIncompleteReason.cs +++ b/src/Generated/Models/RunIncompleteReason.cs @@ -16,11 +16,8 @@ public RunIncompleteReason(string value) _value = value ?? throw new ArgumentNullException(nameof(value)); } - private const string MaxCompletionTokensValue = "max_completion_tokens"; - private const string MaxPromptTokensValue = "max_prompt_tokens"; - - public static RunIncompleteReason MaxCompletionTokens { get; } = new RunIncompleteReason(MaxCompletionTokensValue); - public static RunIncompleteReason MaxPromptTokens { get; } = new RunIncompleteReason(MaxPromptTokensValue); + private const string MaxOutputTokenCountValue = "max_completion_tokens"; + private const string MaxInputTokenCountValue = "max_prompt_tokens"; public static bool operator ==(RunIncompleteReason left, RunIncompleteReason right) => left.Equals(right); public static bool operator !=(RunIncompleteReason left, RunIncompleteReason right) => !left.Equals(right); public static implicit operator RunIncompleteReason(string value) => new RunIncompleteReason(value); diff --git a/src/Generated/Models/RunStepTokenUsage.Serialization.cs b/src/Generated/Models/RunStepTokenUsage.Serialization.cs index eed20e75..e3d81cbd 100644 --- a/src/Generated/Models/RunStepTokenUsage.Serialization.cs +++ b/src/Generated/Models/RunStepTokenUsage.Serialization.cs @@ -24,17 +24,17 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWrite if (SerializedAdditionalRawData?.ContainsKey("completion_tokens") != true) { writer.WritePropertyName("completion_tokens"u8); - writer.WriteNumberValue(CompletionTokens); + writer.WriteNumberValue(OutputTokenCount); } if (SerializedAdditionalRawData?.ContainsKey("prompt_tokens") != true) { writer.WritePropertyName("prompt_tokens"u8); - writer.WriteNumberValue(PromptTokens); + writer.WriteNumberValue(InputTokenCount); } if (SerializedAdditionalRawData?.ContainsKey("total_tokens") != true) { writer.WritePropertyName("total_tokens"u8); - writer.WriteNumberValue(TotalTokens); + writer.WriteNumberValue(TotalTokenCount); } if (SerializedAdditionalRawData != null) { diff --git a/src/Generated/Models/RunStepTokenUsage.cs b/src/Generated/Models/RunStepTokenUsage.cs index 3e1e8193..34e6768b 100644 --- a/src/Generated/Models/RunStepTokenUsage.cs +++ b/src/Generated/Models/RunStepTokenUsage.cs @@ -10,27 +10,23 @@ namespace OpenAI.Assistants public partial class RunStepTokenUsage { internal IDictionary SerializedAdditionalRawData { get; set; } - internal RunStepTokenUsage(int completionTokens, int promptTokens, int totalTokens) + internal RunStepTokenUsage(int outputTokenCount, int inputTokenCount, int totalTokenCount) { - CompletionTokens = completionTokens; - PromptTokens = promptTokens; - TotalTokens = totalTokens; + OutputTokenCount = outputTokenCount; + InputTokenCount = inputTokenCount; + TotalTokenCount = totalTokenCount; } - internal RunStepTokenUsage(int completionTokens, int promptTokens, int totalTokens, IDictionary serializedAdditionalRawData) + internal RunStepTokenUsage(int outputTokenCount, int inputTokenCount, int totalTokenCount, IDictionary serializedAdditionalRawData) { - CompletionTokens = completionTokens; - PromptTokens = promptTokens; - TotalTokens = totalTokens; + OutputTokenCount = outputTokenCount; + InputTokenCount = inputTokenCount; + TotalTokenCount = totalTokenCount; SerializedAdditionalRawData = serializedAdditionalRawData; } internal RunStepTokenUsage() { } - - public int CompletionTokens { get; } - public int PromptTokens { get; } - public int TotalTokens { get; } } } diff --git a/src/Generated/Models/ThreadRun.Serialization.cs b/src/Generated/Models/ThreadRun.Serialization.cs index 6f264303..d5cb1a2c 100644 --- a/src/Generated/Models/ThreadRun.Serialization.cs +++ b/src/Generated/Models/ThreadRun.Serialization.cs @@ -223,10 +223,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions } if (SerializedAdditionalRawData?.ContainsKey("max_prompt_tokens") != true) { - if (MaxPromptTokens != null) + if (MaxInputTokenCount != null) { writer.WritePropertyName("max_prompt_tokens"u8); - writer.WriteNumberValue(MaxPromptTokens.Value); + writer.WriteNumberValue(MaxInputTokenCount.Value); } else { @@ -235,10 +235,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions } if (SerializedAdditionalRawData?.ContainsKey("max_completion_tokens") != true) { - if (MaxCompletionTokens != null) + if (MaxOutputTokenCount != null) { writer.WritePropertyName("max_completion_tokens"u8); - writer.WriteNumberValue(MaxCompletionTokens.Value); + writer.WriteNumberValue(MaxOutputTokenCount.Value); } else { diff --git a/src/Generated/Models/ThreadRun.cs b/src/Generated/Models/ThreadRun.cs index f1998480..65ab7b08 100644 --- a/src/Generated/Models/ThreadRun.cs +++ b/src/Generated/Models/ThreadRun.cs @@ -12,7 +12,7 @@ public partial class ThreadRun { internal IDictionary SerializedAdditionalRawData { get; set; } - internal ThreadRun(string id, InternalRunObjectObject @object, DateTimeOffset createdAt, string threadId, string assistantId, RunStatus status, InternalRunRequiredAction internalRequiredAction, RunError lastError, DateTimeOffset? expiresAt, DateTimeOffset? startedAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, RunIncompleteDetails incompleteDetails, string model, string instructions, IReadOnlyList tools, IReadOnlyDictionary metadata, RunTokenUsage usage, float? temperature, float? nucleusSamplingFactor, int? maxPromptTokens, int? maxCompletionTokens, RunTruncationStrategy truncationStrategy, ToolConstraint toolConstraint, bool? allowParallelToolCalls, AssistantResponseFormat responseFormat, IDictionary serializedAdditionalRawData) + internal ThreadRun(string id, InternalRunObjectObject @object, DateTimeOffset createdAt, string threadId, string assistantId, RunStatus status, InternalRunRequiredAction internalRequiredAction, RunError lastError, DateTimeOffset? expiresAt, DateTimeOffset? startedAt, DateTimeOffset? cancelledAt, DateTimeOffset? failedAt, DateTimeOffset? completedAt, RunIncompleteDetails incompleteDetails, string model, string instructions, IReadOnlyList tools, IReadOnlyDictionary metadata, RunTokenUsage usage, float? temperature, float? nucleusSamplingFactor, int? maxInputTokenCount, int? maxOutputTokenCount, RunTruncationStrategy truncationStrategy, ToolConstraint toolConstraint, bool? allowParallelToolCalls, AssistantResponseFormat responseFormat, IDictionary serializedAdditionalRawData) { Id = id; Object = @object; @@ -35,8 +35,8 @@ internal ThreadRun(string id, InternalRunObjectObject @object, DateTimeOffset cr Usage = usage; Temperature = temperature; NucleusSamplingFactor = nucleusSamplingFactor; - MaxPromptTokens = maxPromptTokens; - MaxCompletionTokens = maxCompletionTokens; + MaxInputTokenCount = maxInputTokenCount; + MaxOutputTokenCount = maxOutputTokenCount; TruncationStrategy = truncationStrategy; ToolConstraint = toolConstraint; AllowParallelToolCalls = allowParallelToolCalls; @@ -67,8 +67,6 @@ internal ThreadRun() public IReadOnlyDictionary Metadata { get; } public RunTokenUsage Usage { get; } public float? Temperature { get; } - public int? MaxPromptTokens { get; } - public int? MaxCompletionTokens { get; } public RunTruncationStrategy TruncationStrategy { get; } } } diff --git a/src/Generated/ModerationClient.cs b/src/Generated/ModerationClient.cs index 9eac7994..5cfe3128 100644 --- a/src/Generated/ModerationClient.cs +++ b/src/Generated/ModerationClient.cs @@ -18,8 +18,6 @@ public partial class ModerationClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected ModerationClient() { } diff --git a/src/Generated/OpenAIClient.cs b/src/Generated/OpenAIClient.cs index 6b71023a..6d35313c 100644 --- a/src/Generated/OpenAIClient.cs +++ b/src/Generated/OpenAIClient.cs @@ -30,8 +30,6 @@ public partial class OpenAIClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected OpenAIClient() { } diff --git a/src/Generated/OpenAIModelFactory.cs b/src/Generated/OpenAIModelFactory.cs index 50039e67..2e0e0aaf 100644 --- a/src/Generated/OpenAIModelFactory.cs +++ b/src/Generated/OpenAIModelFactory.cs @@ -53,9 +53,9 @@ public static RunStepError RunStepError(RunStepErrorCode code = default, string return new RunStepError(code, message, serializedAdditionalRawData: null); } - public static RunStepTokenUsage RunStepTokenUsage(int completionTokens = default, int promptTokens = default, int totalTokens = default) + public static RunStepTokenUsage RunStepTokenUsage(int outputTokenCount = default, int inputTokenCount = default, int totalTokenCount = default) { - return new RunStepTokenUsage(completionTokens, promptTokens, totalTokens, serializedAdditionalRawData: null); + return new RunStepTokenUsage(outputTokenCount, inputTokenCount, totalTokenCount, serializedAdditionalRawData: null); } public static ModerationResultCollection ModerationResultCollection(string id = null, string model = null, IEnumerable results = null) diff --git a/src/Generated/VectorStoreClient.cs b/src/Generated/VectorStoreClient.cs index 5e44c7ab..2d597f0c 100644 --- a/src/Generated/VectorStoreClient.cs +++ b/src/Generated/VectorStoreClient.cs @@ -18,8 +18,6 @@ public partial class VectorStoreClient private readonly ClientPipeline _pipeline; private readonly Uri _endpoint; - public virtual ClientPipeline Pipeline => _pipeline; - protected VectorStoreClient() { } diff --git a/tests/Chat/OpenAIChatModelFactoryTests.cs b/tests/Chat/OpenAIChatModelFactoryTests.cs index 5bb1747f..ea8ecce6 100644 --- a/tests/Chat/OpenAIChatModelFactoryTests.cs +++ b/tests/Chat/OpenAIChatModelFactoryTests.cs @@ -326,7 +326,7 @@ public void ChatCompletionWithSystemFingerprintWorks() [Test] public void ChatCompletionWithUsageWorks() { - ChatTokenUsage usage = OpenAIChatModelFactory.ChatTokenUsage(outputTokens: 20); + ChatTokenUsage usage = OpenAIChatModelFactory.ChatTokenUsage(outputTokenCount: 20); ChatCompletion chatCompletion = OpenAIChatModelFactory.ChatCompletion(usage: usage); Assert.That(chatCompletion.Id, Is.Null); @@ -465,7 +465,7 @@ public void ChatTokenUsageWithNoPropertiesWorks() public void ChatTokenUsageWithOutputTokensWorks() { int outputTokens = 271828; - ChatTokenUsage chatTokenUsage = OpenAIChatModelFactory.ChatTokenUsage(outputTokens: outputTokens); + ChatTokenUsage chatTokenUsage = OpenAIChatModelFactory.ChatTokenUsage(outputTokenCount: outputTokens); Assert.That(chatTokenUsage.OutputTokenCount, Is.EqualTo(outputTokens)); Assert.That(chatTokenUsage.InputTokenCount, Is.EqualTo(0)); @@ -476,7 +476,7 @@ public void ChatTokenUsageWithOutputTokensWorks() public void ChatTokenUsageWithInputTokensWorks() { int inputTokens = 271828; - ChatTokenUsage chatTokenUsage = OpenAIChatModelFactory.ChatTokenUsage(inputTokens: inputTokens); + ChatTokenUsage chatTokenUsage = OpenAIChatModelFactory.ChatTokenUsage(inputTokenCount: inputTokens); Assert.That(chatTokenUsage.OutputTokenCount, Is.EqualTo(0)); Assert.That(chatTokenUsage.InputTokenCount, Is.EqualTo(inputTokens)); @@ -487,7 +487,7 @@ public void ChatTokenUsageWithInputTokensWorks() public void ChatTokenUsageWithTotalTokensWorks() { int totalTokens = 271828; - ChatTokenUsage chatTokenUsage = OpenAIChatModelFactory.ChatTokenUsage(totalTokens: totalTokens); + ChatTokenUsage chatTokenUsage = OpenAIChatModelFactory.ChatTokenUsage(totalTokenCount: totalTokens); Assert.That(chatTokenUsage.OutputTokenCount, Is.EqualTo(0)); Assert.That(chatTokenUsage.InputTokenCount, Is.EqualTo(0)); @@ -809,7 +809,7 @@ public void StreamingChatCompletionUpdateWithSystemFingerprintWorks() [Test] public void StreamingChatCompletionUpdateWithUsageWorks() { - ChatTokenUsage usage = OpenAIChatModelFactory.ChatTokenUsage(outputTokens: 20); + ChatTokenUsage usage = OpenAIChatModelFactory.ChatTokenUsage(outputTokenCount: 20); StreamingChatCompletionUpdate streamingChatCompletionUpdate = OpenAIChatModelFactory.StreamingChatCompletionUpdate(usage: usage); Assert.That(streamingChatCompletionUpdate.Id, Is.Null); diff --git a/tests/Embeddings/OpenAIEmbeddingsModelFactoryTests.cs b/tests/Embeddings/OpenAIEmbeddingsModelFactoryTests.cs index a9a38a25..2ff6bfca 100644 --- a/tests/Embeddings/OpenAIEmbeddingsModelFactoryTests.cs +++ b/tests/Embeddings/OpenAIEmbeddingsModelFactoryTests.cs @@ -77,7 +77,7 @@ public void EmbeddingCollectionWithModelWorks() [Test] public void EmbeddingCollectionWithUsageWorks() { - EmbeddingTokenUsage usage = OpenAIEmbeddingsModelFactory.EmbeddingTokenUsage(inputTokens: 10); + EmbeddingTokenUsage usage = OpenAIEmbeddingsModelFactory.EmbeddingTokenUsage(inputTokenCount: 10); OpenAIEmbeddingCollection embeddingCollection = OpenAIEmbeddingsModelFactory.OpenAIEmbeddingCollection(usage: usage); Assert.That(embeddingCollection.Count, Is.EqualTo(0)); @@ -98,7 +98,7 @@ public void EmbeddingTokenUsageWithNoPropertiesWorks() public void EmbeddingTokenUsageWithInputTokensWorks() { int inputTokens = 10; - EmbeddingTokenUsage embeddingTokenUsage = OpenAIEmbeddingsModelFactory.EmbeddingTokenUsage(inputTokens: inputTokens); + EmbeddingTokenUsage embeddingTokenUsage = OpenAIEmbeddingsModelFactory.EmbeddingTokenUsage(inputTokenCount: inputTokens); Assert.That(embeddingTokenUsage.InputTokenCount, Is.EqualTo(10)); Assert.That(embeddingTokenUsage.TotalTokenCount, Is.EqualTo(default(int))); @@ -108,7 +108,7 @@ public void EmbeddingTokenUsageWithInputTokensWorks() public void EmbeddingTokenUsageWithTotalTokensWorks() { int totalTokens = 10; - EmbeddingTokenUsage embeddingTokenUsage = OpenAIEmbeddingsModelFactory.EmbeddingTokenUsage(totalTokens: totalTokens); + EmbeddingTokenUsage embeddingTokenUsage = OpenAIEmbeddingsModelFactory.EmbeddingTokenUsage(totalTokenCount: totalTokens); Assert.That(embeddingTokenUsage.InputTokenCount, Is.EqualTo(default(int))); Assert.That(embeddingTokenUsage.TotalTokenCount, Is.EqualTo(totalTokens));